content
stringlengths 5
1.05M
|
|---|
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
import re
from utils.tools import logger
def _spans_with_parent(traces, parent_ids):
if not isinstance(traces, list):
logger.error("Traces should be an array")
yield from [] # do notfail here, it's schema's job
else:
for trace in traces:
for span in trace:
if span.get("parent_id") in parent_ids:
yield span
def get_root_spans(traces):
yield from _spans_with_parent(traces, (0, None))
def _get_rid_from_span(span):
if not isinstance(span, dict):
logger.error(f"Span should be an object, not {type(span)}")
return None
meta = span.get("meta", {})
user_agent = None
if span.get("type") == "rpc":
user_agent = meta.get("grpc.metadata.user-agent")
# java does not fill this tag; it uses the normal http tags
if not user_agent:
# code version
user_agent = meta.get("http.request.headers.user-agent")
if not user_agent: # try something for .NET
user_agent = meta.get("http_request_headers_user-agent")
if not user_agent: # last hope
user_agent = meta.get("http.useragent")
return get_rid_from_user_agent(user_agent)
def get_rid_from_user_agent(user_agent):
if not user_agent:
return None
match = re.search("rid/([A-Z]{36})", user_agent)
if not match:
return None
return match.group(1)
def get_spans_related_to_rid(traces, rid):
if not isinstance(traces, list):
logger.error("Traces should be an array")
yield from [] # do notfail here, it's schema's job
else:
for trace in traces:
for span in trace:
if rid is None or rid == _get_rid_from_span(span):
yield span
|
import versioneer
from setuptools import setup
setup_args = dict(
name='nbexamples',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD',
platforms=['Jupyter Notebook'],
packages=[
'nbexamples'
],
include_package_data=True,
install_requires=[
'notebook>=4.2.0',
'nbconvert',
'nbformat'
]
)
if __name__ == '__main__':
setup(**setup_args)
|
# Searching Algorithm Visualizer #
from tkinter import *
from tkinter import messagebox
from PIL import Image,ImageTk
import time
class Initialization:
def __init__(self,root):
self.window= root
self.store=[10,20,30,40,50,60,70,80,90,100]
self.search_position_linear = -1
self.search_position_binary = -1
self.start_binary = 0
self.end_binary = 9
self.linear_indicator = 0
self.binary_indicator = 0
self.search_value_take = None
self.make_canvas = Canvas(self.window,bg="chocolate",width=900,height=600,relief=RAISED,bd=4)
self.make_canvas.pack()
heading = lambda: Label(self.make_canvas,text="Searching Algo Visualizer",bg="chocolate",fg="yellow",font=("Arial",25,"bold","italic")).place(x=250,y=15)
heading()
class Setup(Initialization):
def __init__(self, root):
super(Setup,self).__init__(root)
self.__search_and_index_label_indicate()
self.__two_array_maker()
self.__array_void_containers_and_index_maker()
self.make_arrow()
self.__make_correct_wrong()
self.__steps_label()
def __search_and_index_label_indicate(self):
Label(self.make_canvas, text="Linear\nSearch", font=("Arial",30,"bold"), bg="chocolate", fg="gold").place(x=40, y=150 + 50)
Label(self.make_canvas, text="Binary\nSearch", font=("Arial", 30, "bold"), bg="chocolate", fg="gold").place(x=40, y=150 + 50+250)
Label(self.make_canvas, text="Index no.", font=("Arial", 13, "bold"), bg="chocolate", fg="brown").place(x=70, y=150 + 10)
Label(self.make_canvas, text="Index no.", font=("Arial", 13, "bold"), bg="chocolate", fg="brown").place(x=70, y=150 + 10 + 250)
def __two_array_maker(self):
self.make_canvas.create_line(200, 150+50, 718+3, 150+50,width=3,fill="blue")
self.make_canvas.create_line(200, 206+50, 718+3, 206+50, width=3,fill="blue")
self.make_canvas.create_line(200, 450, 718 + 3, 450, width=3, fill="blue")
self.make_canvas.create_line(200, 506, 718 + 3, 506, width=3, fill="blue")
def __array_void_containers_and_index_maker(self):
start_x=201
Label(self.make_canvas,text="-1",font=("Arial",20,"bold","italic"),bg="chocolate",fg="blue").place(x=start_x-52,y=203-50)
Label(self.make_canvas,text="-1",font=("Arial",20,"bold","italic"),bg="chocolate",fg="blue").place(x=start_x-52,y=453-50)
for i in range(10):
self.make_canvas.create_rectangle(start_x,203,start_x+50,253,width=3,outline="#00FF00")
Label(self.make_canvas,text=(i+1)*10,font=("Arial",15,"bold","italic"),bg="chocolate",fg="yellow").place(x=start_x+10,y=203+10)
# Up array index maker
Label(self.make_canvas,text=i,font=("Arial",20,"bold","italic"),bg="chocolate",fg="blue").place(x=start_x+15,y=203-50)
self.make_canvas.create_rectangle(start_x,453,start_x+50,503,width=3,outline="#00FF00")
Label(self.make_canvas,text=(i+1)*10,font=("Arial",15,"bold","italic"),bg="chocolate",fg="yellow").place(x=start_x+10,y=453+10)
# Down array index maker
Label(self.make_canvas,text=i,font=("Arial",20,"bold","italic"),bg="chocolate",fg="blue").place(x=start_x+15,y=453-50)
start_x+=52
def make_arrow(self):
points_linear = (170,100+50, 170-20,(100+60)/2+50, 170-10,(100+60)/2+50, 170-10,60+50, 170+10,60+50, 170+10,(100+60)/2+50, 170+20,(100+60)/2+50)
self.linear_indicator = self.make_canvas.create_polygon(points_linear,width=3,fill="#0FFF0F",outline="black")
points_binary = (170,100+50+250, 170-20,(100+60)/2+50+250, 170-10,(100+60)/2+50+250, 170-10,60+50+250, 170+10,60+50+250, 170+10,(100+60)/2+50+250, 170+20,(100+60)/2+50+250)
self.binary_indicator = self.make_canvas.create_polygon(points_binary,width=3,fill="#0FFF0F",outline="black")
def __make_correct_wrong(self):
global img_correct, img_wrong
self.pic_correct_x=155
self.pic_wrong_x=155
img_correct = ImageTk.PhotoImage(Image.open("Images/correct.png").resize((30,30),Image.ANTIALIAS))
self.pic_correct = Label(self.make_canvas,image=img_correct,bg="chocolate")
img_wrong = ImageTk.PhotoImage(Image.open("Images/wrong.png").resize((30, 30), Image.ANTIALIAS))
self.pic_wrong = Label(self.make_canvas, image=img_wrong, bg="chocolate")
def __steps_label(self):
self.binary_step_counter = 0
self.linear_step_counter = 0
self.step_linear = Label(self.make_canvas,text="Steps: "+ str(self.binary_step_counter),font=("Arial",20,"bold"),bg="chocolate",fg="gold")
self.step_linear.place(x=750,y=210)
self.step_binary = Label(self.make_canvas, text="Steps: " + str(self.linear_step_counter), font=("Arial", 20, "bold"), bg="chocolate", fg="gold")
self.step_binary.place(x=750, y=210+250)
# According to the instruction change arrow position
def arrow_movement_controller(self,counter,direction,arrow_particular):
try:
num = 52 * counter
while num:
if direction == 0:
if arrow_particular == "linear":
self.make_canvas.move(self.linear_indicator, -1, 0)
else:
self.make_canvas.move(self.binary_indicator, -1, 0)
self.pic_wrong_x -= 1
self.pic_correct_x -= 1
else:
if arrow_particular == "linear":
self.make_canvas.move(self.linear_indicator, 1, 0)
else:
self.make_canvas.move(self.binary_indicator, 1, 0)
self.pic_wrong_x += 1
self.pic_correct_x += 1
self.window.update()
time.sleep(0.01)
num -= 1
except:
print("Force stop error")
class Functionality(Setup):
def __init__(self, root):
super(Functionality, self).__init__(root)
self.__introduction_btn()
def __introduction_btn(self):
self.search_val_btn = Button(self.window, text="Search a value", font=("Arial", 25, "bold", "italic"), bg="black", fg="green", relief=RAISED, bd=7, command=self.__entry_take_forward)
self.search_val_btn.place(x=50, y=618)
self.reset_btn = Button(self.window, text="Reset", font=("Arial", 25, "bold", "italic"), bg="black", fg="green", relief=RAISED, bd=7, command=self.__reset_all, state=DISABLED)
self.reset_btn.place(x=700, y=618)
# Take entry from user and forward
def __entry_take_forward(self):
self.search_value_take = Entry(self.window, font=("Arial", 25, "bold", "italic"), fg="green", relief=SUNKEN, bd=7, width=5)
self.search_value_take.place(x=350, y=630)
self.search_value_take.focus()
self.search_val_activation = Button(self.window, text="Search Value", font=("Arial", 18, "bold", "italic"), bg="gold", fg="green", relief=RAISED, bd=5, command=self.__give_searching_control)
self.search_val_activation.place(x=500, y=630)
# Linear and Binary Search Control give
def __give_searching_control(self):
try:
self.search_val_btn['state'] = DISABLED
self.search_val_activation['state'] = DISABLED
# Searching value filtering
try:
if int(self.search_value_take.get()):
pass
except:
messagebox.showerror("Input error","Please give a integer value to input")
self.search_val_btn['state'] = NORMAL
return
self.__linear_search_it()
# After linear search customization
if 9>=self.search_position_linear>=0:
self.pic_correct.place_forget()
self.linear_conformation = Label(self.make_canvas,text="Found",font=("Arial",20,"bold"),bg="chocolate",fg="brown")
self.linear_conformation.place(x=self.pic_correct_x-15,y=70)
else:
self.make_canvas.move(self.linear_indicator,-(52*2),0)
self.make_canvas.itemconfig(self.linear_indicator,fill="red")
self.window.update()
self.linear_conformation = Label(self.make_canvas, text="Not found searching value in this array", font=("Arial", 20, "bold"), bg="chocolate", fg="brown")
self.linear_conformation.place(x=self.pic_correct_x - 400, y=70)
# Default position set
self.pic_correct_x=155
self.pic_correct_y = 70+250
self.pic_wrong_x = 155
self.pic_wrong_y = 70+250
self.__binary_search_it()
# After binary search customization
if self.start_binary<=self.end_binary:
self.pic_correct.place_forget()
self.binary_conformation = Label(self.make_canvas,text="Found",font=("Arial",20,"bold"),bg="chocolate",fg="brown")
self.binary_conformation.place(x=self.pic_correct_x-15,y=70+250)
else:
self.make_canvas.itemconfig(self.binary_indicator,fill="Red")
self.binary_conformation = Label(self.make_canvas, text="Not found searching value", font=("Arial", 20, "bold"), bg="chocolate", fg="brown")
self.binary_conformation.place(x=self.pic_correct_x - 200, y=70+250)
self.reset_btn['state'] = NORMAL
except:
print("Force stop error")
# Linear Search Process Control
def __linear_search_it(self):
try:
if self.search_position_linear>9:
messagebox.showerror("Not found","Searching value not found")
# Initial Condition
elif self.search_position_linear == -1:
self.arrow_movement_controller(1,1,"linear")
self.search_position_linear += 1
self.linear_step_counter += 1
self.step_linear['text'] = "Steps: "+ str(self.linear_step_counter)
self.__linear_search_it()
# Value get condition
elif self.store[self.search_position_linear] == int(self.search_value_take.get()):
self.pic_correct.place(x=self.pic_correct_x, y=70)
self.window.update()
time.sleep(0.5)
else:
# Pic Show and forget
self.pic_wrong.place(x=self.pic_correct_x, y=70)
self.window.update()
time.sleep(0.5)
self.pic_wrong.place_forget()
# Arrow Movement Control
self.search_position_linear+=1
self.arrow_movement_controller(1,1,"linear")
# Steps Update
if self.linear_step_counter<10:
self.linear_step_counter += 1
self.step_linear['text'] = "Steps: " + str(self.linear_step_counter)
self.__linear_search_it()
except:
print("Force stop error")
def __binary_search_it(self):
try:
if self.start_binary<=self.end_binary:
middle = int((self.start_binary + self.end_binary) / 2)
if self.search_position_binary == -1:# Initial Condition
self.arrow_movement_controller(middle + 1, 1,"binary")
elif self.search_position_binary < middle:
self.arrow_movement_controller(middle - self.search_position_binary, 1,"binary")
else:
self.arrow_movement_controller(self.search_position_binary - middle, 0,"binary")
self.search_position_binary = middle
# Steps Update
self.binary_step_counter += 1
self.step_binary['text'] = "Steps: " + str(self.binary_step_counter)
# Value get situation
if self.store[middle] == int(self.search_value_take.get()):
self.pic_correct.place(x=self.pic_correct_x, y=70 + 250)
self.window.update()
else:
# Pic Show and forget
self.pic_wrong.place(x=self.pic_correct_x, y=70 + 250)
self.window.update()
time.sleep(0.5)
self.pic_wrong.place_forget()
# Range Concept
if int(self.search_value_take.get()) > self.store[middle]:
self.start_binary = middle + 1
else:
self.end_binary = middle - 1
self.__binary_search_it()
else:
messagebox.showerror("Not found","Searching value not found")
except:
print("Force stop error")
def __reset_all(self):
# Button state customization
self.reset_btn['state'] = DISABLED
self.search_val_btn['state'] = NORMAL
self.search_val_activation['state'] = NORMAL
# Default set
self.pic_correct_x = self.pic_wrong_x = 155
self.pic_correct_y = self.pic_wrong_y = 70
self.make_canvas.delete(self.linear_indicator)
self.make_canvas.delete(self.binary_indicator)
self.make_arrow()
self.linear_conformation.place_forget()
self.binary_conformation.place_forget()
self.search_position_linear = -1
self.search_position_binary = -1
self.start_binary = 0
self.end_binary = 9
self.linear_step_counter = 0
self.binary_step_counter = 0
self.step_linear['text'] = "Steps: " + str(self.linear_step_counter)
self.step_binary['text'] = "Steps: " + str(self.binary_step_counter)
if __name__ == '__main__':
window = Tk()
window.geometry("900x700")
window.maxsize(900,700)
window.minsize(900,700)
window.title("Searching Visualizer")
window.config(bg="orange")
window.iconbitmap("Images/search_icon.ico")
Functionality(window)
window.mainloop()
|
import argparse
import json
import logging
import os
from flask import Flask, request, jsonify, abort
from flask_restful import Api
parser = argparse.ArgumentParser(description='Multimedia probe analysis.')
parser.add_argument(
'-j',
dest='json',
type=str,
help='JSON template for instances in server')
parser.add_argument(
'-d',
dest='debug',
const=True,
default=False,
nargs='?',
help='Debug mode to restart server if any change in the code')
parser.add_argument(
'-q',
dest='quiet',
const=True,
default=False,
nargs='?',
help='Quiet mode to remove received request from terminal')
parser.add_argument(
'-s',
dest='server',
type=str,
default='0.0.0.0',
help='Name of the host server')
parser.add_argument(
'-p',
dest='port',
type=int,
default=5000,
help='Host server port')
args = parser.parse_args()
api_name = 'API'
DATA = []
if args.json == 'eve':
DATA = json.load(open('Data/data_eve.json', 'r'))
api_name = '5G-EVE Rest API'
elif args.json == 'energy':
DATA = json.load(open('Data/data_energy.json', 'r'))
api_name = '5G-Energy Rest API'
# Define API
app = Flask(api_name)
app.config['JSON_SORT_KEYS'] = False # Prevent sort data and keys
api = Api(app)
os.environ['FLASK_ENV'] = "development"
# Remove log requests in the terminal
if args.quiet:
log = logging.getLogger('werkzeug')
log.disabled = True
def start_api_server():
app.run(debug=args.debug, host=args.server, port=args.port)
# curl http://localhost:5000/api/probe
@app.route('/api/probe', methods=['GET'])
def get():
return jsonify(DATA)
# curl -X POST -H "Content-Type: application/json" -d @Data/data_eve.json http://localhost:5000/api/probe
@app.route('/api/probe', methods=['POST'])
def post():
# Force JSON to avoid conflicts
data_request = request.get_json(force=True)
try:
# Loop through actual data to check ID. Create new
# instance only if not present ID, abort otherwise
if len(DATA) > 0:
if 'id' in DATA[0]:
ids = list(item['id'] for item in data_request)
for id_item in ids:
if id_item in list(item['id'] for item in DATA):
abort(409, 'A conflict happened while processing the request. Check if your data was previously created.')
for item_request in data_request:
DATA.append(item_request)
elif 'uuid' in DATA[0]:
uuids = list(item['uuid'] for item in data_request)
for uuid in uuids:
if uuid in list(item['uuid'] for item in DATA):
abort(409, 'A conflict happened while processing the request. Check if your data was previously created.')
for item_request in data_request:
DATA.append(item_request)
else:
for item in data_request:
DATA.append(item)
except KeyError:
abort(500, 'The server encountered an internal error and was unable to complete your request. Verify the sending of the same information as on the server')
return jsonify(data_request), 201
# curl -X PUT -H "Content-Type: application/json" -d @Data/data_eve.json http://localhost:5000/api/probe
@app.route('/api/probe', methods=['PUT'])
def put():
# Force JSON to avoid conflicts
data_request = request.get_json(force=True)
try:
# Loop through data to check ID. Abort if not
# match every requested ID with data
if len(DATA) == len(data_request):
if 'uuid' in DATA[0]:
uuids = list(item['uuid'] for item in data_request)
for uuid in uuids:
if uuid not in list(item['uuid'] for item in DATA):
abort(409, 'A conflict happened while processing the request. Check if your data was previously created.')
for item in DATA:
for item_request in data_request:
if item_request['uuid'] == item['uuid']:
for property_item in item:
item[property_item] = item_request[property_item]
elif 'id' in DATA[0]:
ids = list(item['id'] for item in data_request)
for id_item in ids:
if id_item not in list(item['id'] for item in DATA):
abort(409, 'A conflict happened while processing the request. Check if your data was previously created.')
for item in DATA:
for item_request in data_request:
if item_request['id'] == item['id']:
for property_item in item:
item[property_item] = item_request[property_item]
else:
abort(500, 'The server encountered an internal error and was unable to complete your request. The properties of the request data must be the same as on the server.')
else:
abort(400, 'The browser (or proxy) sent a request that this server could not understand. Your data request length should match data in the server.')
except KeyError:
abort(500, 'The server encountered an internal error and was unable to complete your request. The properties of the request data must be the same as on the server.')
return jsonify(data_request), 200
# curl http://localhost:5000/api/probe -X DELETE
@app.route('/api/probe', methods=['DELETE'])
def delete():
# Clear all data
if len(DATA) > 0:
DATA.clear()
else:
abort(409, 'A conflict happened while processing the request. There is no data to delete on the server.')
return '', 204
# curl http://localhost:5000/api/probe/1
@app.route('/api/probe/<item_id>', methods=['GET'])
def get_id(item_id):
# Create an empty list for our results
results = []
# Loop through the data and match results that fit the requested ID.
# Abort if there is no ID in JSON data
try:
if len(DATA) > 0:
if 'id' in DATA[0]:
for item in DATA:
if item['id'] == int(item_id):
results.append(item)
elif 'uuid' in DATA[0]:
for item in DATA:
if item['uuid'] == item_id:
results.append(item)
if not results:
abort(404)
except KeyError:
abort(500, 'The server encountered an internal error and was unable to complete your request. The properties of the request data must be the same as on the server.')
return jsonify(results)
# curl -X PUT -H "Content-Type: application/json" -d @Data/data_energy.json http://localhost:5000/api/probe/1
@app.route('/api/probe/<item_id>', methods=['PUT'])
def put_id(item_id):
# Force JSON to avoid conflicts
data_request = request.get_json(force=True)
try:
# ID path must match ID from data request
if 'id' in data_request[0]:
if int(item_id) != data_request[0]['id']:
abort(409, 'A conflict happened while processing the request. Requested URL ID does not correspond with provided ID in data request.')
elif 'uuid' in data_request[0]:
if str(item_id) != data_request[0]['uuid']:
abort(409, 'A conflict happened while processing the request. Requested URL ID does not correspond with provided ID in data request.')
else:
abort(500, 'The server encountered an internal error and was unable to complete your request. The properties of the request data must be the same as on the server.')
# Allow only one instance, as it is individual request
if len(data_request) > 1:
abort(400, 'The browser (or proxy) sent a request that this server could not understand.')
# Loop through data to check ID. Abort if not
# match requested ID with any data
if len(DATA) > 0:
if 'uuid' in DATA[0]:
if str(item_id) not in list(item['uuid'] for item in DATA):
abort(409, 'A conflict happened while processing the request. Check if your data was previously created.')
for item in DATA:
for item_request in data_request:
if item_request['uuid'] == item['uuid']:
for property_item in item:
item[property_item] = item_request[property_item]
elif 'id' in DATA[0]:
if int(item_id) not in list(item['id'] for item in DATA):
abort(409, 'A conflict happened while processing the request. Check if your data was previously created.')
for item in DATA:
for item_request in data_request:
if item_request['id'] == item['id']:
for property_item in item:
item[property_item] = item_request[property_item]
else:
abort(500, 'The server encountered an internal error and was unable to complete your request. The properties of the request data must be the same as on the server.')
# If no local data, use POST request to create new instance
else:
abort(400, 'The browser (or proxy) sent a request that this server could not understand.')
except KeyError:
abort(500, 'The server encountered an internal error and was unable to complete your request. The properties of the request data must be the same as on the server.')
return jsonify(data_request), 200
# curl http://localhost:5000/api/probe/1 -X DELETE
@app.route('/api/probe/<item_id>', methods=['DELETE'])
def delete_id(item_id):
# Create an empty list for our results
results = []
if len(DATA) > 0:
if 'id' in DATA[0]:
for item in DATA:
if item['id'] == int(item_id):
DATA.remove(item)
results.append(item)
if 'uuid' in DATA[0]:
for item in DATA:
if item['uuid'] == item_id:
DATA.remove(item)
results.append(item)
if not results:
abort(409, 'A conflict happened while processing the request. There is no data to delete on the server.')
return '', 204
# curl http://localhost:5000/api/probe/shutdown
@app.route('/api/probe/shutdown', methods=['GET', 'POST'])
def shutdown_server():
shutdown = request.environ.get('werkzeug.server.shutdown')
if shutdown is None:
raise RuntimeError('The function is unavailable')
else:
shutdown()
return 'Shutting down the server...\n'
if __name__ == '__main__':
start_api_server()
|
# -*- coding: utf-8 -*-
import sys
import json
FLAT = False
SEPARATOR = ">"
def dumps(data):
if FLAT:
return json.dumps(data)
return json.dumps(data, indent=4, sort_keys=True)
def output(data, fh=sys.stdout):
"""Output data, if it is not string, then serialized to JSON.
Input:
- data: if str or unicode, output as is. If other, serialize to JSON
- fh: file handle for output, defaults to sys.stdout
"""
if FLAT and data == SEPARATOR:
pass
return
if type(data) in [str, unicode]:
fh.write(data)
fh.write("\n")
return
fh.write(dumps(data))
fh.write("\n")
return
|
from inferencer import Inferencer, Inferencer_Params
from tfcore.interfaces.IPipeline_Inferencer import IPipeline_Inferencer_Params, IPipeline_Inferencer
from tfcore.utilities.preprocessing import Preprocessing
import gflags
import os
import sys
import imageio
import numpy as np
def get_filename(idx, filename='', decimals=5):
for n in range(decimals, -1, -1):
if idx < pow(10, n):
filename += '0'
else:
filename += str(idx)
break
return filename + '.png'
class Pipeline_Inferencer_Params(IPipeline_Inferencer_Params):
def __init__(self,
data_dir_y,
data_dir_x=''):
super().__init__(data_dir_y=data_dir_y, data_dir_x=data_dir_x)
class Pipeline_Inferencer(IPipeline_Inferencer):
def __init__(self, inferencer, params, pre_processing):
super().__init__(inferencer, params, pre_processing)
def get_element(self, idx):
try:
img_x = imageio.imread(self.files_x[idx])
img_y = self.files_x[idx].count('good')
except FileNotFoundError:
raise FileNotFoundError(' [!] File not found of data-set x')
if self.pre_processing is not None:
img_x, _ = self.pre_processing.run(img_x, None)
return img_x, np.asarray(img_y)
# flags = tf.app.flags
flags = gflags.FLAGS
gflags.DEFINE_string("dataset", "../Data/", "Dataset path")
gflags.DEFINE_string("outdir", "../Data/predictions/softmax", "Output path")
gflags.DEFINE_string("model_dir", "../softmax", "Model directory")
def main():
flags(sys.argv)
model_params = Inferencer_Params(image_size=256,
model_path=flags.model_dir)
model_inferencer = Inferencer(model_params)
pipeline_params = Pipeline_Inferencer_Params(data_dir_x=os.path.join(flags.dataset, 'test_X'),
data_dir_y=None)
pipeline = Pipeline_Inferencer(inferencer=model_inferencer, params=pipeline_params, pre_processing=None)
count = 0
first_pass = True
while first_pass or img_out is not None:
if first_pass:
first_pass = False
if not os.path.exists(flags.outdir):
os.makedirs(flags.outdir)
img_out = pipeline.run()
if img_out is not None:
filename = get_filename(count, 'mask_')
imageio.imwrite(os.path.join(flags.outdir, filename), img_out)
print(' [*] save file ' + filename)
count += 1
if __name__ == "__main__":
main()
|
"""
Created on 21 Aug 2013
@author: Anna
"""
import math
import numpy
import random
from .Globals import G
class Allocation:
def __init__(self, itemList, week, altRoutes, excBuffer):
self.week = week
self.altRoutes = altRoutes
self.itemList = itemList
self.excBuffer = excBuffer
def Run(self):
for CurrentMA in self.itemList:
# call the allocation methods based on the step (e.g. allocation on same route or allocation on alternative routes)
if self.altRoutes == 1:
self.alternativeRoutes(CurrentMA)
else:
self.allocationStd(CurrentMA)
# put items in output buffer (temporary buffer for excess units to be allocated)
if CurrentMA.qty > 0:
self.excBuffer.append(CurrentMA)
# allocate item on its own route
def allocationStd(self, MA):
sufficient = True # flag that shows if we have sufficient capacity
# read the capacity that the MA requires
requiredCapacity = {}
for x in G.RouteDict[MA.MAid]["route"]:
requiredCapacity[x] = G.RouteDict[MA.MAid]["route"][x] * MA.qty
# read the remaining capacity for thegiven week and subtract the required from it
remainingCapacity = {}
for bottleneck in G.CurrentCapacityDict:
remainingCapacity[bottleneck] = (
G.CurrentCapacityDict[bottleneck][self.week]
- requiredCapacity[bottleneck]
)
# if we dropped below zero then the capacity is not sufficient
if remainingCapacity[bottleneck] < 0:
sufficient = False
# check if there is sufficient capacity to process the order
if sufficient:
# update remaining capacity
allocableQty = MA.qty
if MA.qty >= G.minPackingSize:
for bottleneck in G.CurrentCapacityDict:
G.CurrentCapacityDict[bottleneck][self.week] = remainingCapacity[
bottleneck
]
# if the capacity available is not sufficient, the max allocable qty is derived
else:
# calculate max qty allocable
# excessUnits = [0 for i in range(len(requiredCapacity))]
excessUnits = {}
excess = 0
for bottleneck in remainingCapacity:
if (
requiredCapacity[bottleneck] > 0
and remainingCapacity[bottleneck] < 0
):
excessUnits = (
remainingCapacity[bottleneck]
/ G.RouteDict[MA.MAid]["route"][bottleneck]
)
if math.ceil(math.fabs(excessUnits)) > excess:
excess = math.ceil(math.fabs(excessUnits))
# update remaining capacity
assert excess <= MA.qty or MA.qty < G.minPackingSize
allocableQty = MA.qty - excess
if allocableQty >= G.minPackingSize:
# rCap = numpy.array(G.currentCapacity[self.week]) - numpy.multiply(allocableQty,G.route[MA.MAid])
for bottleneck in G.CurrentCapacityDict:
G.CurrentCapacityDict[bottleneck][self.week] -= (
allocableQty * G.RouteDict[MA.MAid]["route"][bottleneck]
)
# update attributes/variables affected by allocation
if allocableQty >= G.minPackingSize:
MA.qty -= allocableQty
MA.minQty = max([0, MA.minQty - allocableQty])
# update allocation output variable
# distinguish case of FutureDemand from PPOSdemand
if MA.future == 1:
G.AllocationFuture[G.replication].append(
[MA.orderID, MA.MAid, allocableQty, self.week + 1]
)
G.FutureLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.FutureEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
else:
G.AllocationPPOS[G.replication].append(
[MA.orderID, MA.MAid, allocableQty, self.week + 1]
)
G.PPOSLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.PPOSEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
def alternativeRoutes(self, MA):
sufficient = False # flag that shows if we have sufficient capacity
# identify MAs with the same SP as the MA investigated
alternativeMADict = (
{}
) # FIXME: the PPOS attribute can be used instead for the current MA
# loop through the MAinfo
for alernativeMA in G.RouteDict:
# if it is the same MA do not consider it
if alernativeMA == MA.MAid:
continue
# if the alternative MA is of the same SP add it to the list
PPOS = G.RouteDict[alernativeMA]["PPOS"]
SP = G.RouteDict[alernativeMA]["SP"]
if PPOS == MA.PPOSid and SP == MA.SPid:
alternativeMADict[alernativeMA] = G.RouteDict[alernativeMA]
# calculate max number of units for each alternative MA
maxUnits = {}
for alternativeMA in alternativeMADict:
MAunits = []
for routeElement in alternativeMADict[alternativeMA]["route"]:
units = alternativeMADict[alternativeMA]["route"][routeElement]
if units != 0:
MAunits.append(
G.CurrentCapacityDict[routeElement][self.week] / units
)
sufficient = True
maxUnits[alternativeMA] = math.floor(min(MAunits))
# choose MA with max number of units
if maxUnits and sufficient:
maxU = 0
maxID = []
for MAid in maxUnits:
if maxUnits[MAid] > maxU:
maxU = maxUnits[MAid]
maxID = [MAid]
if maxUnits[MAid] == maxU:
maxID.append(MAid)
# choose MA randomly among those with max number of units
chosenMAId = random.choice(maxID)
allocableQty = min([maxU, MA.qty])
if allocableQty >= G.minPackingSize:
for bottleneck in G.CurrentCapacityDict:
G.CurrentCapacityDict[bottleneck][self.week] -= (
allocableQty * G.RouteDict[chosenMAId]["route"][bottleneck]
)
# update attributes/variables affected by allocation
MA.qty -= allocableQty
MA.minQty = max([0, MA.minQty - allocableQty])
# update allocation output variable
# distinguish case of FutureDemand from PPOSdemand
if MA.future == 1:
G.AllocationFuture[G.replication].append(
[MA.orderID, chosenMAId, allocableQty, self.week + 1]
)
G.FutureLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.FutureEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
else:
G.AllocationPPOS[G.replication].append(
[MA.orderID, chosenMAId, allocableQty, self.week + 1]
)
G.PPOSLateness[G.replication] += (
max([0, self.week - MA.originalWeek]) * allocableQty
)
G.PPOSEarliness[G.replication] += (
max([0, MA.originalWeek - self.week]) * allocableQty
)
|
from configs import Level, LEVEL_MAP, PROCESS_METRICS_FIELDS
from db.QueryBuilder import get_level_refactorings, get_level_stable
from refactoring_statistics.plot_utils import box_plot_seaborn_simple
from refactoring_statistics.query_utils import retrieve_columns, get_last_refactored_instance_all
from utils.log import log_init, log_close, log
import time
import datetime
import pandas as pd
from pathlib import Path
from os import path
from scipy import stats
# metrics
CLASS_METRICS_Fields = ["classCbo",
"classLcom",
"classLCC",
"classTCC",
"classRfc",
"classWmc"]
CLASS_ATTRIBUTES_QTY_Fields = ["classUniqueWordsQty", "classNumberOfMethods", "classStringLiteralsQty",
"classNumberOfPublicFields", "classVariablesQty", "classLoc"]
METRIC_SETS = {"CLASS_METRICS_Fields": CLASS_METRICS_Fields, "CLASS_ATTRIBUTES_QTY_Fields": CLASS_ATTRIBUTES_QTY_Fields, "PO_METRIC_SETS": PROCESS_METRICS_FIELDS}.items()
REFACTORING_SAMPLES = 100000
STABLE_Ks = [15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]
STABLE_SAMPLES = 50000
REFACTORING_LEVELS = [Level.Class, Level.Method, Level.Variable, Level.Field, Level.Other]
STABLE_LEVELS = [Level.Class, Level.Method, Level.Variable, Level.Field]
def compute_statistics(metric_data, level, metric, extra_field=""):
stat, p = stats.shapiro(metric_data)
alpha = 0.05
is_non_normal = p < alpha
skew = stats.skew(metric_data.to_numpy())[0]
extra_field_name = 'refactoring_name'
if len(extra_field) <= 3:
extra_field_name = 'k'
return {
f"{extra_field_name}": extra_field,
'level': str(level),
'metric': metric,
'skew': skew,
'mean': metric_data.mean().iloc[0],
'std': metric_data.std().iloc[0],
'min': metric_data.min().iloc[0],
'5%': metric_data.quantile(.05).iloc[0],
'25%': metric_data.quantile(.25).iloc[0],
'50%': metric_data.quantile(.50).iloc[0],
'75%': metric_data.quantile(.75).iloc[0],
'95%': metric_data.quantile(.95).iloc[0],
'max': metric_data.max().iloc[0],
'Shapiro-Wilk-test': f"Statistics={stat}, p={p}",
'non-normal_distribution': is_non_normal
}
def refactoring_statistics(dataset, save_dir, levels, metrics, file_descriptor, refactorings=False):
statistics = pd.DataFrame(
columns=['refactoring_name', 'level', 'metric', 'descriptive_statistics', 'skew', 'Shapiro-Wilk-test',
'non-normal_distribution'])
for level in levels:
statistics_path = f"{save_dir}{file_descriptor}{str(level)}_{dataset}.csv"
if not path.exists(statistics_path):
for metric in metrics:
if refactorings:
for refactoring_name in LEVEL_MAP[level]:
metric_data = retrieve_columns(get_level_refactorings(int(level), refactoring_name, dataset), [metric], samples=REFACTORING_SAMPLES)
statistics = statistics.append(compute_statistics(metric_data, level, metric, extra_field=refactoring_name), ignore_index=True)
else:
for k in STABLE_Ks:
metric_data = retrieve_columns(get_level_stable(int(level), k, dataset), metrics, STABLE_SAMPLES)
statistics = statistics.append(compute_statistics(metric_data, level, metric, extra_field=f"{k}"), ignore_index=True)
statistics.to_csv(statistics_path, index=False, header=True)
log(f"Collected all statistics for {str(level)} and stored them at: {statistics_path}.")
else:
statistics = statistics.append(pd.read_csv(statistics_path), ignore_index=True)
grouped = statistics.groupby(["metric", "level"], as_index=False).mean()
excel_path = f"{save_dir}{file_descriptor}_{dataset}.xlsx"
grouped.to_excel(excel_path, index=False)
return statistics
SAVE_DIR = f"results/Distribution/Statistics/"
log_init(f"{SAVE_DIR}feature_statistics_{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt")
start_time = time.time()
Path(path.dirname(SAVE_DIR)).mkdir(parents=True, exist_ok=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# for metric_description, metrics in METRIC_SETS:
# statistics = pd.DataFrame()
# metrics_data = pd.DataFrame()
# for metric in metrics:
# metrics = get_last_refactored_instance_all([metric], REFACTORING_SAMPLES * 5)
# statistics_metric = compute_statistics(metrics, Level.NONE, metric, extra_field="all")
# statistics = statistics.append(statistics_metric, ignore_index=True)
# metrics_data = metrics_data.append(metrics)
# log(f"Extract {metric}")
#
# if metric_description == "CLASS_METRICS_Fields":
# yticks=[1, 2.5, 3.5, 5, 7.5, 10, 15, 20, 25, 35, 50, 100, 150, 250, 500, 650, 1000]
# elif metric_description == "CLASS_ATTRIBUTES_QTY_Fields":
# yticks=[0.5, 0.75, 1, 2.5, 3.5, 5, 7.5, 10, 15, 20, 25, 35, 50, 75, 100, 150, 200]
# else:
# yticks=[0.1, 0.15, 0.25, 0.5, 0.75, 1, 1.5, 2.0, 2.5, 3.5, 5, 6, 7.5, 9, 10, 15, 20]
#
# fig_path = f"{SAVE_DIR}last_refactored_class_{metric_description}_plot.svg"
# box_plot_seaborn_simple(metrics_data, f"{metric_description}", fig_path, "log", yticks=yticks)
#
# grouped = statistics.groupby(["metric", "level"], as_index=False).mean()
# excel_path = f"{SAVE_DIR}last_refactored_class_{metric_description}.xlsx"
# grouped.to_excel(excel_path, index=False)
# log(f"Stored statistics for {metric_description}")
for refactorings in [True, False]:
for metric_description, metrics in METRIC_SETS:
log(f"{refactorings} {metric_description}")
if refactorings:
refactoring_statistics("", SAVE_DIR, REFACTORING_LEVELS, metrics, f"refactoring_{metric_description}", refactorings)
else:
refactoring_statistics("", SAVE_DIR, STABLE_LEVELS, metrics, f"stable_{metric_description}", refactorings)
log('Generating Statistics took %s seconds.' % (time.time() - start_time))
log_close()
exit()
|
"""Simple HTML5 document builder."""
__version__ = '0.1.0'
__author__ = 'skitschy'
class HTML5Builder:
"""Simple HTML5 generator class.
The following code generates a blank japanese HTML5 document.
>>> tag = HTML5Builder()
>>> doc = tag.html([tag.head(''), tag.body('')], lang='ja')
>>> str(tag.doctype + str(doc))
'<!DOCTYPE html><html lang="ja"><head></head><body></body></html>'
"""
doctype = '<!DOCTYPE html>'
"""HTML5 Doctype string."""
class __ElementBuilder(object):
def __init__(self, name):
self.name = name
def __call__(self, children=[], **kwargs):
if 'cls' in kwargs:
kwargs['class'] = kwargs.pop('cls')
return HTML5Element(self.name, children, kwargs)
def __getattr__(self, name):
"""Return a callable element builder.
This special method is called
through ``self.name(children, **kwargs)``.
This pseudo-method creates a :class:`HTML5Element`.
The pseudo-method name specifies the name of a generating element.
The argument ``children`` and the keyword arguments are
passed to the :class:`HTML5Element` initializer
as ``children`` and ``attrs`` arguments, respectively.
In the keyword arguments, ``cls`` can be used instead of ``class``.
>>> tag = HTML5Builder()
>>> tag.a('anchor text', href='target.html')
HTML5Element('a', ['anchor text'], {'href': 'target.html'})
>>> tag.img(src='image.png')
HTML5Element('img', [], {'src': 'image.png'})
>>> tag.div('', cls='divclass')
HTML5Element('div', [''], {'class': 'divclass'})
"""
return self.__ElementBuilder(name)
class HTML5Element:
"""Simple HTML5 element class.
An instance is usually created through :class:`HTML5Builder`.
Args:
name (str): The element name.
children (sequence): The sequence of element children.
attrs (mapping): The mapping of element attributes.
If ``children`` is a string or non-sequence,
the argument is the child of the instance.
If ``children`` is a sequence,
the elements of the sequence are the children of the instance.
Attributes:
name (str): The element name.
child (:obj:`list`): The list of element children.
attrs (:obj:`dict`): The dictionary of element attributes.
"""
def __init__(self, name, children, attrs):
self.name = str(name)
if isinstance(children, str) or not hasattr(children, '__iter__'):
self.child = [children]
else:
self.child = list(children)
self.attrs = dict(attrs)
def __getitem__(self, key):
"""Syntax sugar for ``self.attr[key]``."""
if key in self.attrs:
return self.attrs[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
"""Syntax sugar for ``self.attr[key]=``."""
self.attrs[key] = value
def __repr__(self):
return 'HTML5Element({!r}, {!r}, {!r})'.format(
self.name, self.child, self.attrs)
def __str__(self):
"""Output a string of the outer HTML.
For each child, :func:`str` is recursively called.
>>> str(HTML5Element('span', 'inner', {}))
'<span>inner</span>'
>>> str(HTML5Element('div', '', {'class': 'divclass'}))
'<div class="divclass"></div>'
>>> str(HTML5Element('a',
... [HTML5Element('img', [], {'src': 'image.png'}), 'anchor text'],
... {'href': 'target.html'}))
'<a href="target.html"><img src="image.png">anchor text</a>'
"""
if self.attrs:
attrstr = ' '.join(
[''] + ['{}="{}"'.format(k, v) for k, v in self.attrs.items()])
else:
attrstr = ''
if self.child:
inner = ''.join([str(child) for child in self.child])
return '<{0}{1}>{2}</{0}>'.format(self.name, attrstr, inner)
else:
return '<{0}{1}>'.format(self.name, attrstr)
|
# 1.7.2 Woche 1, Block 7, Aufgabe 2
# Import
from turtle import *
from random import randint
# Malen Settings
speed(10)
bgcolor('black')
x = 1
while x < 400:
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
colormode(255)
pencolor(r, g, b)
fd(50 + x)
rt(90.911)
x += 1
exitonclick()
|
# -*- coding: utf-8 -*-
#!/bin/env python3
# 194-67-90-149
import os, sys
import time
import io
import getopt
import subprocess
import time
import signal
import yaml
import requests
os.system('cls')
re = "\033[1;31m"
gr = "\033[1;32m"
cy="\033[1;36m"
logo = (f"""
_ __ {re}___ __{cy}
____(_)______ ____/ /__ _____{re}/ _ )___ / /_{cy}
/ __/ / __/ _ `/ _ / _ `/___{re}/ _ / _ \/ __/{cy}
\__/_/\__/\_,_/\_,_/\_,_/ {re}/____/\___/\__/{cy}
----------Telegram-Bot-PhotoHosting-----------
""")
re = "\033[1;31m"
gr = "\033[1;32m"
cy = "\033[1;36m"
print(logo)
print(f"""
{gr}+-+-+-+-+-+ +-+-+-+-+{cy}
{gr}|T|o|k|e|n| |B|o|t|a|{cy}
{gr}+-+-+-+-+-+ +-+-+-+-+{cy}
""")
token = input('⏩⏩⏩ ')
os.system('cls')
print(logo)
message_bot = 'Начать беседу со мной'
#message_bot = input(f"{re}\nТекст вспомогательной кнопки /start: ")
os.system('cls')
data = {'bot': {'token': token,
'parse_mode': 'html'}, 'executor': {'skip_updates':
True}, 'modules': ['handlers.main', 'handlers.errors'],
'log_ignore': ['aiogram', 'asyncio', 'aiogram.Middleware'],
'commands': {'start': message_bot}}
with io.open('config.yaml', 'w', encoding='utf8') as outfile:
yaml.dump(data, outfile, default_flow_style=False, allow_unicode=True)
import requests
MethodGetMe = (f'''https://api.telegram.org/bot{token}/GetMe''')
response = requests.post(MethodGetMe)
tttm = response.json()
id_us = (tttm['result']['id'])
first_name = (tttm['result']['first_name'])
username = (tttm['result']['username'])
os.system('del git/')
os.system('git init')
os.system('git add .')
os.system('git config --global user.email "you@example.com"')
os.system('git config --global user.name "Your Name"')
os.system('git commit -m "den" ')
os.system('heroku create')
os.system('git remote -v')
time.sleep(2)
os.system('git push heroku')
time.sleep(3)
os.system('heroku ps:scale worker=1')
print(logo)
print(f"""
---------------------------------
🆔 Твой id: {id_us}
---------------------------------
👤 Имя Бота: {first_name}
---------------------------------
🗣 username: {username}
---------------------------------
🌐 https://t.me/{username}
---------------------------------
******* Suport: @Satanasat ******
""")
input("Бот Запущен")
|
import datetime
import unittest
from base import option
class TestOptionsClass(unittest.TestCase):
def testOptionClassCreation(self):
"""Tests than an exception is raised when class is instantiated."""
with self.assertRaisesRegex(TypeError, "Cannot instantiate abstract class."):
option.Option(underlyingTicker='SPY', strikePrice=250, delta=0.3, expirationDateTime=datetime.datetime.now())
if __name__ == '__main__':
unittest.main()
|
import time
import struct
try:
# Try to import the Python 3.x enum module
from enum import IntEnum
except:
# If we're on Python 2.x we need to define
# a dummy replacement
class IntEnum:
pass
# <pep8 compliant>
LOG_READ_TIME = False
LOG_WRITE_TIME = False
LOG_ANIM_HEADER = False
LOG_ANIM_BONES = False
LOG_ANIM_BONE_MODIFIERS = False
LOG_ANIM_BONES_KEYS = False
LOG_ANIM_NOTES = False
class SEANIM_TYPE(IntEnum):
SEANIM_TYPE_ABSOLUTE = 0
SEANIM_TYPE_ADDITIVE = 1
SEANIM_TYPE_RELATIVE = 2
SEANIM_TYPE_DELTA = 3
class SEANIM_PRESENCE_FLAGS(IntEnum):
# These describe what type of keyframe data is present for the bones
SEANIM_BONE_LOC = 1 << 0
SEANIM_BONE_ROT = 1 << 1
SEANIM_BONE_SCALE = 1 << 2
# If any of the above flags are set, then bone keyframe data is present,
# thus this comparing against this mask will return true
SEANIM_PRESENCE_BONE = 1 << 0 | 1 << 1 | 1 << 2
SEANIM_PRESENCE_NOTE = 1 << 6 # The file contains notetrack data
SEANIM_PRESENCE_CUSTOM = 1 << 7 # The file contains a custom data block
class SEANIM_PROPERTY_FLAGS(IntEnum):
SEANIM_PRECISION_HIGH = 1 << 0
class SEANIM_FLAGS(IntEnum):
SEANIM_LOOPED = 1 << 0
class Info(object):
__slots__ = ('version', 'magic')
def __init__(self, file=None):
self.version = 1
self.magic = b'SEAnim'
if file is not None:
self.load(file)
def load(self, file):
bytes = file.read(8)
data = struct.unpack('6ch', bytes)
magic = b''
for i in range(6):
magic += data[i]
version = data[6]
assert magic == self.magic
assert version == self.version
def save(self, file):
bytes = self.magic
bytes += struct.pack('h', self.version)
file.write(bytes)
class Header(object):
__slots__ = (
'animType', 'animFlags',
'dataPresenceFlags', 'dataPropertyFlags',
'framerate', 'frameCount',
'boneCount', 'boneAnimModifierCount',
'noteCount'
)
def __init__(self, file=None):
self.animType = SEANIM_TYPE.SEANIM_TYPE_RELATIVE # Relative is default
self.animFlags = 0x0
self.dataPresenceFlags = 0x0
self.dataPropertyFlags = 0x0
self.framerate = 0
self.frameCount = 0
self.boneCount = 0
self.boneAnimModifierCount = 0
self.noteCount = 0
if file is not None:
self.load(file)
def load(self, file):
bytes = file.read(2)
data = struct.unpack('h', bytes)
headerSize = data[0]
bytes = file.read(headerSize - 2)
# = prefix tell is to ignore C struct packing rules
data = struct.unpack('=6BfII4BI', bytes)
self.animType = data[0]
self.animFlags = data[1]
self.dataPresenceFlags = data[2]
self.dataPropertyFlags = data[3]
# reserved = data[4]
# reserved = data[5]
self.framerate = data[6]
self.frameCount = data[7]
self.boneCount = data[8]
self.boneAnimModifierCount = data[9]
# reserved = data[10]
# reserved = data[11]
# reserved = data[12]
self.noteCount = data[13]
def save(self, file):
bytes = struct.pack('=6BfII4BI',
self.animType, self.animFlags,
self.dataPresenceFlags, self.dataPropertyFlags,
0, 0,
self.framerate,
self.frameCount, self.boneCount,
self.boneAnimModifierCount, 0, 0, 0,
self.noteCount)
size = struct.pack('h', len(bytes) + 2)
file.write(size)
file.write(bytes)
class Frame_t(object):
"""
The Frame_t class is only ever used to get the size
and format character used by frame indices in a given seanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if header.frameCount <= 0xFF:
self.size = 1
self.char = 'B'
elif header.frameCount <= 0xFFFF:
self.size = 2
self.char = 'H'
else: # if header.frameCount <= 0xFFFFFFFF:
self.size = 4
self.char = 'I'
class Bone_t(object):
"""
The Bone_t class is only ever used to get the size
and format character used by frame indices in a given seanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if header.boneCount <= 0xFF:
self.size = 1
self.char = 'B'
elif header.boneCount <= 0xFFFF:
self.size = 2
self.char = 'H'
else: # if header.boneCount <= 0xFFFFFFFF:
self.size = 4
self.char = 'I'
class Precision_t(object):
"""
The Precision_t class is only ever used to get the size
and format character used by vec3_t, quat_t, etc. in a given sanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if (header.dataPropertyFlags &
SEANIM_PROPERTY_FLAGS.SEANIM_PRECISION_HIGH):
self.size = 8
self.char = 'd'
else:
self.size = 4
self.char = 'f'
class KeyFrame(object):
"""
A small class used for holding keyframe data
"""
__slots__ = ('frame', 'data')
def __init__(self, frame, data):
self.frame = frame
self.data = data
class Bone(object):
__slots__ = (
'name', 'flags',
'locKeyCount', 'rotKeyCount', 'scaleKeyCount',
'posKeys', 'rotKeys', 'scaleKeys',
'useModifier', 'modifier'
)
def __init__(self, file=None):
self.name = ""
self.flags = 0x0
self.locKeyCount = 0
self.rotKeyCount = 0
self.scaleKeyCount = 0
self.posKeys = []
self.rotKeys = []
self.scaleKeys = []
self.useModifier = False
self.modifier = 0
if file is not None:
self.load(file)
def load(self, file):
bytes = b''
b = file.read(1)
while not b == b'\x00':
bytes += b
b = file.read(1)
self.name = bytes.decode("utf-8")
def loadData(self, file, frame_t, precision_t,
useLoc=False, useRot=False, useScale=False):
# Read the flags for the bone
bytes = file.read(1)
data = struct.unpack("B", bytes)
self.flags = data[0]
# Load the position keyframes if they are present
if useLoc:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.locKeyCount = data[0]
for _ in range(self.locKeyCount):
bytes = file.read(frame_t.size + 3 * precision_t.size)
data = struct.unpack('=%c3%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
pos = (data[1], data[2], data[3])
self.posKeys.append(KeyFrame(frame, pos))
# Load the rotation keyframes if they are present
if useRot:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.rotKeyCount = data[0]
for _ in range(self.rotKeyCount):
bytes = file.read(frame_t.size + 4 * precision_t.size)
data = struct.unpack('=%c4%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
# Load the quaternion as XYZW
quat = (data[1], data[2], data[3], data[4])
self.rotKeys.append(KeyFrame(frame, quat))
# Load the Scale Keyrames
if useScale:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.scaleKeyCount = data[0]
for _ in range(self.scaleKeyCount):
bytes = file.read(frame_t.size + 3 * precision_t.size)
data = struct.unpack('=%c3%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
scale = (data[1], data[2], data[3])
self.scaleKeys.append(KeyFrame(frame, scale))
def save(self, file, frame_t, bone_t, precision_t,
useLoc=False, useRot=False, useScale=False):
bytes = struct.pack("B", self.flags)
file.write(bytes)
if useLoc:
bytes = struct.pack('%c' % frame_t.char, len(self.posKeys))
file.write(bytes)
for key in self.posKeys:
bytes = struct.pack('=%c3%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1], key.data[2])
file.write(bytes)
if useRot:
bytes = struct.pack('%c' % frame_t.char, len(self.rotKeys))
file.write(bytes)
for key in self.rotKeys:
bytes = struct.pack('=%c4%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1],
key.data[2], key.data[3])
file.write(bytes)
if useScale:
bytes = struct.pack('%c' % frame_t.char, len(self.scaleKeys))
file.write(bytes)
for key in self.scaleKeys:
bytes = struct.pack('=%c3%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1], key.data[2])
file.write(bytes)
class Note(object):
__slots__ = ('frame', 'name')
def __init__(self, file=None, frame_t=None):
self.frame = -1
self.name = ""
if file is not None:
self.load(file, frame_t)
def load(self, file, frame_t):
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.frame = data[0]
bytes = b''
b = file.read(1)
while not b == b'\x00':
bytes += b
b = file.read(1)
self.name = bytes.decode("utf-8")
def save(self, file, frame_t):
bytes = struct.pack('%c' % frame_t.char, self.frame)
file.write(bytes)
bytes = struct.pack('%ds' % (len(self.name) + 1), self.name.encode())
file.write(bytes)
class Anim(object):
__slots__ = ('__info', 'info', 'header', 'bones',
'boneAnimModifiers', 'notes')
def __init__(self, path=None):
self.__info = Info()
self.header = Header()
self.bones = []
self.boneAnimModifiers = []
self.notes = []
if path is not None:
self.load(path)
# Update the header flags based on the presence of certain keyframe /
# notetrack data
def update_metadata(self, high_precision=False, looping=False):
anim_locKeyCount = 0
anim_rotKeyCount = 0
anim_scaleKeyCount = 0
header = self.header
header.boneCount = len(self.bones)
dataPresenceFlags = header.dataPresenceFlags
dataPropertyFlags = header.dataPropertyFlags
max_frame_index = 0
for bone in self.bones:
bone.locKeyCount = len(bone.posKeys)
bone.rotKeyCount = len(bone.rotKeys)
bone.scaleKeyCount = len(bone.scaleKeys)
anim_locKeyCount += bone.locKeyCount
anim_rotKeyCount += bone.rotKeyCount
anim_scaleKeyCount += bone.scaleKeyCount
for key in bone.posKeys:
max_frame_index = max(max_frame_index, key.frame)
for key in bone.rotKeys:
max_frame_index = max(max_frame_index, key.frame)
for key in bone.scaleKeys:
max_frame_index = max(max_frame_index, key.frame)
if anim_locKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
if anim_rotKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
if anim_scaleKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE
for note in self.notes:
max_frame_index = max(max_frame_index, note.frame)
header.noteCount = len(self.notes)
if header.noteCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE
if high_precision:
dataPropertyFlags |= SEANIM_PROPERTY_FLAGS.SEANIM_PRECISION_HIGH
if looping:
header.animFlags |= SEANIM_FLAGS.SEANIM_LOOPED
header.dataPresenceFlags = dataPresenceFlags
header.dataPropertyFlags = dataPropertyFlags
# FrameCount represents the length of the animation in frames
# and since all animations start at frame 0 - we simply grab
# the max frame number (from keys / notes / etc.) and add 1 to it
header.frameCount = max_frame_index + 1
def load(self, path):
if LOG_READ_TIME:
time_start = time.time()
print("Loading: '%s'" % path)
try:
file = open(path, "rb")
except IOError:
print("Could not open file for reading:\n %s" % path)
return
self.info = Info(file)
self.header = Header(file)
self.boneAnimModifiers = []
# Init the frame_t, bone_t and precision_t info
frame_t = Frame_t(self.header)
bone_t = Bone_t(self.header)
precision_t = Precision_t(self.header)
dataPresenceFlags = self.header.dataPresenceFlags
if LOG_ANIM_HEADER:
print("Magic: %s" % self.info.magic)
print("Version: %d" % self.info.version)
print("AnimType: %d" % self.header.animType)
print("AnimFlags: %d" % self.header.animFlags)
print("PresenceFlags: %d" % dataPresenceFlags)
print("PropertyFlags: %d" % self.header.dataPropertyFlags)
print("FrameRate: %f" % self.header.framerate)
print("FrameCount: %d" % self.header.frameCount)
print("BoneCount: %d" % self.header.boneCount)
print("NoteCount: %d" % self.header.noteCount)
print("BoneModifierCount: %d" % self.header.boneAnimModifierCount)
print("Frame_t Size: %d" % frame_t.size)
print("Frame_t Char: '%s'" % frame_t.char)
self.bones = []
if dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_BONE:
useLoc = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
useRot = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
useScale = (dataPresenceFlags &
SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE)
for i in range(self.header.boneCount):
if LOG_ANIM_BONES:
print("Loading Name for Bone[%d]" % i)
self.bones.append(Bone(file))
for i in range(self.header.boneAnimModifierCount):
bytes = file.read(bone_t.size + 1)
data = struct.unpack("%cB" % bone_t.char, bytes)
index = data[0]
self.bones[index].useModifier = True
self.bones[index].modifier = data[1]
self.boneAnimModifiers.append(self.bones[index])
if LOG_ANIM_BONE_MODIFIERS:
print("Loaded Modifier %d for '%s" %
(index, self.bones[index].name))
for i in range(self.header.boneCount):
if LOG_ANIM_BONES:
print("Loading Data For Bone[%d] '%s'" % (
i, self.bones[i].name))
self.bones[i].loadData(
file, frame_t, precision_t, useLoc, useRot, useScale)
if LOG_ANIM_BONES_KEYS:
for key in self.bones[i].posKeys:
print("%s LOC %d %s" %
(self.bones[i].name, key.frame, key.data))
for key in self.bones[i].rotKeys:
print("%s ROT %d %s" %
(self.bones[i].name, key.frame, key.data))
for key in self.bones[i].scaleKeys:
print("%s SCALE %d %s" %
(self.bones[i].name, key.frame, key.data))
self.notes = []
if (self.header.dataPresenceFlags &
SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE):
for i in range(self.header.noteCount):
note = Note(file, frame_t)
self.notes.append(note)
if LOG_ANIM_NOTES:
print("Loaded Note[%d]:" % i)
print(" Frame %d: %s" % (note.frame, note.name))
file.close()
if LOG_READ_TIME:
time_end = time.time()
time_elapsed = time_end - time_start
print("Done! - Completed in %ss" % time_elapsed)
def save(self, filepath="", high_precision=False, looping=False):
if LOG_WRITE_TIME:
time_start = time.time()
print("Saving: '%s'" % filepath)
try:
file = open(filepath, "wb")
except IOError:
print("Could not open file for writing:\n %s" % filepath)
return
# Update the header flags, based on the presence of different keyframe
# types
self.update_metadata(high_precision, looping)
self.__info.save(file)
self.header.save(file)
for bone in self.bones:
bytes = struct.pack(
'%ds' % (len(bone.name) + 1), bone.name.encode())
file.write(bytes)
dataPresenceFlags = self.header.dataPresenceFlags
useLoc = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
useRot = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
useScale = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE
frame_t = Frame_t(self.header)
bone_t = Bone_t(self.header)
precision_t = Precision_t(self.header)
for index, bone in enumerate(self.bones):
if bone.useModifier:
bytes = struct.pack('%cB' % bone_t.char, index, bone.modifier)
file.write(bytes)
for bone in self.bones:
bone.save(file, frame_t, bone_t, precision_t,
useLoc, useRot, useScale)
if dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE:
for note in self.notes:
note.save(file, frame_t)
file.close()
if LOG_WRITE_TIME:
time_end = time.time()
time_elapsed = time_end - time_start
print("Done! - Completed in %ss" % time_elapsed)
|
# Copyright (C) 2019-2021 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""Module for testing API exceptions."""
import pytest
import requests
from xyzspaces.exceptions import ApiError
def test_exception_requests_inalid(api):
"""Raise exception via requests as response for invalid endpoint."""
with pytest.raises(ApiError) as execinfo:
url = f"{api.xyzconfig.config['url']}/hub/invalid"
resp = requests.get(url)
raise ApiError(resp)
resp = execinfo.value.args[0]
assert resp.status_code == 404
assert resp.reason == "Not Found"
def test_exception_requests_inalid_str(api):
"""Test raised exception as string follow expected pattern."""
with pytest.raises(ApiError) as execinfo:
url = f"{api.xyzconfig.config['url']}/hub/invalid"
resp = requests.get(url)
raise ApiError(resp)
assert str(execinfo.value).startswith('404, Not Found, {"type":"error",')
def test_exception_response_invalid(api):
"""Raise exception via API as response for invalid endpoint."""
with pytest.raises(ApiError) as execinfo:
api.get(path="/hub/invalid")
resp = execinfo.value.args[0]
assert resp.status_code == 404
assert resp.reason == "Not Found"
assert resp.json()["message"] == "The requested resource does not exist."
|
from minpiler.std import L, M, inline
@inline
def f(x):
y = x
M.print_flush(y)
f(L.message1)
# > printflush message1
|
import matplotlib.pyplot as plt
import pyDeltaRCM
with pyDeltaRCM.shared_tools._docs_temp_directory() as output_dir:
delta = pyDeltaRCM.DeltaModel(out_dir=output_dir)
for _t in range(0, 5):
delta.update()
delta.finalize()
fig, ax = plt.subplots()
ax.imshow(delta.bed_elevation, vmax=-3)
plt.show()
|
from db.tables import materials
def load():
return materials.read()
|
import psycopg2
import numpy as np
from numpy import convolve
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from psycopg2 import Error
from DataProcessing import StockData
import displayData
import sys
def pullColumnAll(cursor, ticker, column_name):
"""
Retrieves specific column data for the given parameter (column_name)
Arguments:
cursor: cursor object for the database
ticker: ticker for which we are collecting data
column_name: specific column we want data from
Returns:
column data for the selected column name and the dates associated
"""
DATE_TABLE_QUERY = ('SELECT date FROM {}'.format(ticker))
DATA_TABLE_QUERY = ('SELECT {} FROM {}'.format(column_name, ticker))
print("Your query: ", '\'', DATE_TABLE_QUERY, '\'', sep="")
print("Your query: ", '\'', DATA_TABLE_QUERY, '\'', sep="")
cursor.execute(DATE_TABLE_QUERY)
column_dates = cursor.fetchall()
cursor.execute(DATA_TABLE_QUERY)
column_data = cursor.fetchall()
dates = [d for d in column_dates]
data = [d for d in column_data]
return dates, data
def pullColumnRange(cursor, ticker, column_name, start, end):
"""
Retrieves specific column data over an interval start-ending
NOTE: end must be a later date then start, and in the form timestamp
i.e. start := '2010-08-08 00:00'
end := '2011-08-08 00:00'
Arguments:
cursor: cursor obj for database
ticker: ticker for which we are collecting
column_name: indicator
start: start interval
end: end interval
Returns:
Column data for the given indicator as a list
Length of this list = end-start
"""
try: # Trust user and try from the start date to the end date
DATE_TABLE_QUERY = ('SELECT date FROM {} WHERE date >= {} AND < {}'.format(ticker, start, end))
DATA_TABLE_QUERY = ('SELECT {} FROM {} WHERE date >= {} AND < {}'.format(column_name, ticker, start, end))
cursor.execute(DATE_TABLE_QUERY)
column_dates = cursor.fetchall()
cursor.execute(DATA_TABLE_QUERY)
column_data = cursor.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
pass
try: # Try from start date the the end of available data
DATE_TABLE_QUERY = ('SELECT date FROM {} WHERE date >= {}'.format(ticker, start))
DATA_TABLE_QUERY = ('SELECT {} FROM {} WHERE date >= {}'.format(column_name, ticker, start))
cursor.execute(DATE_TABLE_QUERY)
column_dates = cursor.fetchall()
cursor.execute(DATA_TABLE_QUERY)
column_data = cursor.fetchall()
except:
pass
try:
dates, data = pullColumnAll(cursor, ticker, column_name)
return dates, data
except (Exception, psycopg2.DatabaseError) as error:
print("Unknown error, aborting program!")
print("Your query: ", '\'', DATA_TABLE_QUERY, '\'', sep="")
print("Your query: ", '\'', DATE_TABLE_QUERY, '\'', sep="")
dates = [d for d in column_dates]
data = [d for f in column_data]
return dates, data
def MainWrapper(company_ticker, data_type):
connection = None
try:
# Connect to the PostgreSQL Server & Databse
print('Connecting to the PostgreSQL database...')
connection = psycopg2.connect(host='206.189.181.163',
database='rcos',
user='rcos',
password='hedgehogs_rcos')
# Create a Cursor & Print Version
cursor = connection.cursor()
# Print PostgreSQL Version
print('PostgreSQL database version:', cursor.execute('SELECT version();'))
record = cursor.fetchone()
print('You are connected to - ', record, '\n')
print("COLUMN NAME OPTIONS:")
print("\tdate\n\tlow\n\thigh\n\tvolume\n\tclose\n\topen\n")
if(len(sys.argv) != 3):
print("NOT ENOUGH ARGUMENTS")
# Example for Ticker AAPL
ticker = company_ticker
column_name = data_type
print(ticker)
print(column_name)
dates, data = pullColumnAll(cursor, ticker, column_name)
print("data size: {}\ndates size: {}".format(len(data), len(dates)))
# Example Moving Averages for APPL
window_sma = 50
window_ema = 20
AAPL = StockData(ticker, column_name, dates, data)
print("\nChecking ticker", AAPL.getTicker(), "for column:", AAPL.getIndicator())
print("SMA:", AAPL.simpleMA(window_sma))
print("\nEMA:", AAPL.expMA(window_ema))
print("")
dates, vals = pullColumnAll(cursor, ticker, column_name)
displayData.plotValues(dates, vals, column_name, ticker)
# Test Accessor Methods
#print("Maximum value:", AAPL.getMax())
#print("Median value:", AAPL.getMedian())
#print("Time range:", AAPL.getTimeRange())
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL =>", error)
finally:
#Closing Database Connection
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed.")
# MainWrapper(sys.argv[1], sys.argv[2])
################################################################################
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: owefsad@huoxian.cn
# datetime: 2021/7/1 下午3:02
# project: dongtai-engine
from django.db import models
from dongtai.models.agent import IastAgent
from dongtai.utils.settings import get_managed
class IastReplayQueue(models.Model):
agent = models.ForeignKey(IastAgent, models.DO_NOTHING, blank=True, null=True)
relation_id = models.IntegerField(blank=True, null=True)
state = models.IntegerField(blank=True, null=True)
count = models.IntegerField(blank=True, null=True)
result = models.IntegerField(blank=True, null=True)
create_time = models.IntegerField(blank=True, null=True)
update_time = models.IntegerField(blank=True, null=True)
verify_time = models.IntegerField(blank=True, null=True)
uri = models.CharField(max_length=2000, blank=True, null=True)
method = models.CharField(max_length=10, blank=True, null=True)
scheme = models.CharField(max_length=10, blank=True, null=True)
header = models.CharField(max_length=4000, blank=True, null=True)
params = models.CharField(max_length=2000, blank=True, null=True)
body = models.CharField(max_length=4000, blank=True, null=True)
replay_type = models.IntegerField(blank=True, null=True)
class Meta:
managed = get_managed()
db_table = 'iast_replay_queue'
ordering = ('-replay_type',)
|
"""Check if userbot alive. If you change these, you become the gayest gay such that even the gay world will disown you."""
#IMG CREDITS: @WhySooSerious
import asyncio
from telethon import events
from uniborg.util import admin_cmd
from userbot import ALIVE_NAME
from telethon.tl.types import ChannelParticipantsAdmins
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "CrackBot"
PM_IMG = "https://telegra.ph/file/3feaa0629756e0e45a2b7.jpg"
pm_caption = "**ᴄʀᴀᴄᴋʙᴏᴛ ᴏғғɪᴄɪᴀʟ**\n\n"
pm_caption += "**Yes Master, Am Alive And Systems Are Working Perfectly As It Should Be...**\n\n"
pm_caption += "✘ About My System ✘\n\n"
pm_caption += "➾ **Telethon Version** : 6.0.9\n➾ **Python** : 3.7.4\n"
pm_caption += "➾ **DataBase** : Functioning\n"
pm_caption += "➾ **Bot Creator** : [Crackexy](https://t.me/Crackexy)\n"
pm_caption += "➾ **Crackbot Version** : 3.0\n\n"
pm_caption += f"➾ **My Master** : {DEFAULTUSER}\n"
#@command(outgoing=True, pattern="^.alive$")
@borg.on(admin_cmd(pattern=r"alive"))
async def amireallyalive(alive):
chat = await alive.get_chat()
await alive.delete()
""" For .alive command, check if the bot is running. """
await borg.send_file(alive.chat_id, PM_IMG,caption=pm_caption)
await alive.delete()
|
def LoadParameters(name):
if name == 'q50_4_3_14_params':
from parameters.q50_4_3_14_params import GetQ50Params
return GetQ50Params()
elif name == 'q50_3_7_14_params':
from parameters.q50_3_7_14_params import GetQ50Params
return GetQ50Params()
elif name == 'q50_11_20_14_params':
from parameters.q50_11_20_14_params import GetQ50Params
return GetQ50Params()
else:
raise Exception("Parameter file " + name + " not found")
|
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
class Command(stack.commands.remove.environment.command):
"""
Remove an attribute for an Environment.
<arg type='string' name='environment' repeat='1' optional='1'>
One or more Environment specifications (e.g., 'test').
</arg>
<param type='string' name='attr' optional='0'>
The attribute name that should be removed.
</param>
<example cmd='remove environment attr test attr=sge'>
Removes the attribute sge for text environment machines.
</example>
"""
def run(self, params, args):
self.command('set.attr', self._argv + [ 'scope=environment', 'value=' ])
return self.rc
|
from keras_batchflow.base.batch_shapers import BatchShaper
from keras_batchflow.base.batch_shapers import IEncoderAdaptor
from keras_batchflow.base.batch_shapers import NumpyEncoderAdaptor
from keras_batchflow.base.batch_shapers import PandasEncoderAdaptor
|
# VISA full time master's ph.D question.
# money = 50, items_per_bundles = 20, bundel_cost = 12
def find_bundles(money, items_per_bundles, bundel_cost):
total_money_spent = 0
total_bundel = 0
if money == 0:
return 0
while total_money_spent <= money and money - total_money_spent >= bundel_cost:
total_bundel += items_per_bundles
total_money_spent += bundel_cost
total_money_left = money - total_money_spent
print('total bundels are', total_bundel)
print('money left', total_money_left)
money = 50
items_per_bundles = 20
bundel_cost = 12
find_bundles(money, items_per_bundles, bundel_cost)
|
import os
# Rules for bracket based off of https://www.boydsbets.com/whats-the-farthest-each-seed-has-gone-in-the-tournament/
# Rule 1) Only pick from the top 3 seeds to win tournament
# Rule 2) Best for seeds 4-7 is final four
# Rule 3) Best for seeds 9-11 is elite eight
# Rule 4) Best for seeds 12-16 is sweet sixteen
# Odds Nested Dictionary
# Odds are round, then seed: oddsDict[2][12] is the odds of a 12th seed winning in the 2nd round
# Odds are from https://www.betfirm.com/seeds-national-championship-odds/
oddsDict = {
# seed: {64teams: r1pct, 32teams: r2pct, sweet16: r3pct, elite8: r4pct, final4: r5pct, finals: r6pct}
1: {1: 0.993, 2: 0.860, 3: 0.813, 4: 0.590, 5: 0.610, 6: 0.640},
2: {1: 0.938, 2: 0.675, 3: 0.714, 4: 0.462, 5: 0.433, 6: 0.385},
3: {1: 0.847, 2: 0.615, 3: 0.493, 4: 0.459, 5: 0.647, 6: 0.364},
4: {1: 0.785, 2: 0.593, 3: 0.313, 4: 0.619, 5: 0.231, 6: 0.000},
5: {1: 0.646, 2: 0.527, 3: 0.200, 4: 0.780, 5: 0.428, 6: 0.000},
6: {1: 0.625, 2: 0.478, 3: 0.349, 4: 0.200, 5: 0.667, 6: 0.000},
7: {1: 0.604, 2: 0.322, 3: 0.357, 4: 0.300, 5: 0.000, 6: 0.000},
8: {1: 0.493, 2: 0.197, 3: 0.571, 4: 0.600, 5: 0.000, 6: 0.000},
9: {1: 0.507, 2: 0.096, 3: 0.571, 4: 0.250, 5: 0.000, 6: 0.000},
10: {1: 0.396, 2: 0.403, 3: 0.347, 4: 0.125, 5: 0.000, 6: 0.000},
11: {1: 0.375, 2: 0.444, 3: 0.375, 4: 0.556, 5: 0.000, 6: 0.000},
12: {1: 0.354, 2: 0.431, 3: 0.091, 4: 0.000, 5: 0.000, 6: 0.000},
13: {1: 0.215, 2: 0.194, 3: 0.000, 4: 0.000, 5: 0.000, 6: 0.000},
14: {1: 0.153, 2: 0.100, 3: 0.000, 4: 0.000, 5: 0.000, 6: 0.000},
15: {1: 0.063, 2: 0.222, 3: 0.000, 4: 0.000, 5: 0.000, 6: 0.000},
16: {1: 0.007, 2: 0.000, 3: 0.000, 4: 0.000, 5: 0.000, 6: 0.000}
}
def main():
# Do something here
if __name__ == '__main__':
main();
|
import requests
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0',
}
headers = {'Accept-Language': "lang=AR-DZ"}
s = requests.Session()
url = 'http://www.mae.gov.dz/select_language.aspx?language=ar&file=default_ar.aspx'
r = s.get(url)#, headers=headers)
url = 'http://www.mae.gov.dz/news_article/6396.aspx'
r = s.get(url)#, headers=headers)
soup = BeautifulSoup(r.content, "lxml")
print(soup.getText)
|
dlga_start:close_window = 0
dlga_member_chat:close_window = 1
dlga_event_triggered:close_window = 2
dlga_start:hideout_warder_1 = 3
dlga_start:hideout_torturer_1 = 4
dlga_start:brothel_manager = 5
dlga_start:nakepit_manager = 6
dlga_start:nakepit_manager = 7
dlga_start:close_window.1 = 8
dlga_start:bath_manager = 9
dlga_start:hideout_chef_1 = 10
dlga_start:close_window.2 = 11
dlga_start:close_window.3 = 12
dlga_start:cunt_view_self = 13
dlga_cunt_view_self:close_window = 14
dlga_start:fugitive_1 = 15
dlga_start:temp_talker = 16
dlga_start:mq_94_1 = 17
dlga_start:mq_95_1 = 18
dlga_start:mainq_20_1 = 19
dlga_start:mainq_48_1 = 20
dlga_start:sister_talk = 21
dlga_start:close_window.4 = 22
dlga_start:female_ply_raped_1 = 23
dlga_start:close_window.5 = 24
dlga_start:close_window.6 = 25
dlga_start:close_window.7 = 26
dlga_start:close_window.8 = 27
dlga_start:lady_slave_prison = 28
dlga_start:close_window.9 = 29
dlga_start:close_window.10 = 30
dlga_start:close_window.11 = 31
dlga_start:close_window.12 = 32
dlga_start:close_window.13 = 33
dlga_start:lord_captured_1 = 34
dlga_start:lord_ntr_1 = 35
dlga_start:lady_capture_1 = 36
dlga_start:close_window.14 = 37
dlga_start:religionist_talk = 38
dlga_start:mq_46_1 = 39
dlga_start:lady_rescue_1 = 40
dlga_start:close_window.15 = 41
dlga_start:lady_talk = 42
dlga_start:lady_talk = 43
dlga_start:lady_talk.1 = 44
dlga_start:attack_innocent_1 = 45
dlga_start:close_window.16 = 46
dlga_start:close_window.17 = 47
dlga_start:temp_commander_talk = 48
dlga_start:lord_misdoubt = 49
dlga_start:lord_prisoner_talk = 50
dlga_start:lord_talk = 51
dlga_start:close_window.18 = 52
dlga_start:lord_talk.1 = 53
dlga_start:lord_propose_to_ply_1 = 54
dlga_start:lord_talk.2 = 55
dlga_start:caravan_master_talk = 56
dlga_start:tavern_mer_rec_1 = 57
dlga_start:prison_guard_talk = 58
dlga_start:close_window.19 = 59
dlga_start:close_window.20 = 60
dlga_start:close_window.21 = 61
dlga_start:close_window.22 = 62
dlga_start:close_window.23 = 63
dlga_start:close_window.24 = 64
dlga_start:orphan_talk = 65
dlga_start:member_talk = 66
dlga_start:member_talk.1 = 67
dlga_start:standby_talk = 68
dlga_start:close_window.25 = 69
dlga_start:ex_lady_rejoin = 70
dlga_start:tavernkeeper_talk = 71
dlga_start:prostitute_talk = 72
dlga_start:bard_talk = 73
dlga_start:traveler_talk = 74
dlga_start:ransom_talk = 75
dlga_start:walker_talk = 76
dlga_start:close_window.26 = 77
dlga_start:close_window.27 = 78
dlga_start:goods_merchant_talk = 79
dlga_start:close_window.28 = 80
dlga_start:academy_pre_talk = 81
dlga_start:insult_woman_mst_talk_1 = 82
dlga_insult_woman_mst_talk_1:insult_woman_mst_talk_2 = 83
dlga_insult_woman_mst_talk_2:insult_woman_mst_talk_3 = 84
dlga_insult_woman_mst_talk_3:insult_woman_mst_talk_4 = 85
dlga_insult_woman_mst_talk_4:insult_woman_mst_talk_5 = 86
dlga_insult_woman_mst_talk_5:close_window = 87
dlga_insult_woman_mst_talk_1:insult_woman_gold_1 = 88
dlga_insult_woman_gold_1:close_window = 89
dlga_tavernkeeper_talk:tavernkeeper_qst_insult_tavern_master = 90
dlga_tavernkeeper_qst_insult_tavern_master:close_window = 91
dlga_start:close_window.29 = 92
dlga_start:woman_player_prosti_talk_1 = 93
dlga_woman_player_prosti_talk_1:woman_player_prosti_talk_2 = 94
dlga_woman_player_prosti_talk_2:woman_player_prosti_talk_3 = 95
dlga_woman_player_prosti_talk_3:woman_player_prosti_talk_4 = 96
dlga_woman_player_prosti_talk_4:woman_player_prosti_talk_5 = 97
dlga_woman_player_prosti_talk_5:woman_player_prosti_talk_6 = 98
dlga_woman_player_prosti_talk_6:woman_player_prosti_talk_7 = 99
dlga_woman_player_prosti_talk_7:close_window = 100
dlga_start:gay_noble_man_mst_talk_1 = 101
dlga_gay_noble_man_mst_talk_1:gay_noble_man_mst_talk_2 = 102
dlga_gay_noble_man_mst_talk_2:close_window = 103
dlga_gay_noble_man_mst_talk_1:gay_noble_man_mst_out = 104
dlga_gay_noble_man_mst_out:close_window = 105
dlga_start:mq_37_1 = 106
dlga_mq_37_1:mq_37_2 = 107
dlga_mq_37_2:mq_37_3 = 108
dlga_mq_37_3:mq_37_4 = 109
dlga_mq_37_4:mq_37_5 = 110
dlga_mq_37_5:mq_37_6 = 111
dlga_mq_37_6:mq_37_7 = 112
dlga_mq_37_7:mq_37_8 = 113
dlga_mq_37_8:mq_37_9 = 114
dlga_mq_37_9:close_window = 115
dlga_start:pope_talk = 116
dlga_start:pope_talk.1 = 117
dlga_start:mq_30_1 = 118
dlga_mq_30_1:mq_30_2 = 119
dlga_mq_30_2:mq_30_3 = 120
dlga_mq_30_3:mq_30_4 = 121
dlga_mq_30_4:mq_30_5 = 122
dlga_mq_30_5:mq_30_6 = 123
dlga_mq_30_6:mq_30_7 = 124
dlga_mq_30_7:mq_30_8 = 125
dlga_mq_30_8:close_window = 126
dlga_start:ex_lady_01_join_1 = 127
dlga_start:close_window.30 = 128
dlga_ex_lady_01_join_1:ex_lady_01_join_2 = 129
dlga_ex_lady_01_join_1:close_window = 130
dlga_ex_lady_01_join_2:ex_lady_01_join_3 = 131
dlga_ex_lady_01_join_3:ex_lady_01_join_4 = 132
dlga_ex_lady_01_join_4:ex_lady_01_join_5 = 133
dlga_ex_lady_01_join_5:ex_lady_01_join_6 = 134
dlga_ex_lady_01_join_5:close_window = 135
dlga_ex_lady_01_join_6:ex_lady_01_join_7 = 136
dlga_ex_lady_01_join_7:ex_lady_01_join_8 = 137
dlga_ex_lady_01_join_8:ex_lady_01_join_9 = 138
dlga_ex_lady_01_join_9:close_window = 139
dlga_start:ex_lady_02_join_1 = 140
dlga_start:close_window.31 = 141
dlga_ex_lady_02_join_1:ex_lady_02_join_2 = 142
dlga_ex_lady_02_join_1:close_window = 143
dlga_ex_lady_02_join_2:ex_lady_02_join_3 = 144
dlga_ex_lady_02_join_3:ex_lady_02_join_4 = 145
dlga_ex_lady_02_join_4:ex_lady_02_join_5 = 146
dlga_ex_lady_02_join_5:ex_lady_02_join_6 = 147
dlga_ex_lady_02_join_5:close_window = 148
dlga_ex_lady_02_join_6:ex_lady_02_join_7 = 149
dlga_ex_lady_02_join_7:ex_lady_02_join_8 = 150
dlga_ex_lady_02_join_8:ex_lady_02_join_9 = 151
dlga_ex_lady_02_join_9:close_window = 152
dlga_start:ex_lady_03_join_1 = 153
dlga_start:close_window.32 = 154
dlga_ex_lady_03_join_1:ex_lady_03_join_2 = 155
dlga_ex_lady_03_join_1:close_window = 156
dlga_ex_lady_03_join_2:ex_lady_03_join_3 = 157
dlga_ex_lady_03_join_3:ex_lady_03_join_4 = 158
dlga_ex_lady_03_join_4:ex_lady_03_join_5 = 159
dlga_ex_lady_03_join_5:ex_lady_03_join_6 = 160
dlga_ex_lady_03_join_5:close_window = 161
dlga_ex_lady_03_join_6:ex_lady_03_join_7 = 162
dlga_ex_lady_03_join_7:ex_lady_03_join_8 = 163
dlga_ex_lady_03_join_8:ex_lady_03_join_9 = 164
dlga_ex_lady_03_join_9:close_window = 165
dlga_start:ex_lady_04_join_1 = 166
dlga_start:close_window.33 = 167
dlga_ex_lady_04_join_1:ex_lady_04_join_2 = 168
dlga_ex_lady_04_join_1:close_window = 169
dlga_ex_lady_04_join_2:ex_lady_04_join_3 = 170
dlga_ex_lady_04_join_3:ex_lady_04_join_4 = 171
dlga_ex_lady_04_join_4:ex_lady_04_join_5 = 172
dlga_ex_lady_04_join_5:ex_lady_04_join_6 = 173
dlga_ex_lady_04_join_5:close_window = 174
dlga_ex_lady_04_join_6:ex_lady_04_join_7 = 175
dlga_ex_lady_04_join_7:ex_lady_04_join_8 = 176
dlga_ex_lady_04_join_8:ex_lady_04_join_9 = 177
dlga_ex_lady_04_join_9:close_window = 178
dlga_start:ex_lady_05_join_1 = 179
dlga_start:close_window.34 = 180
dlga_ex_lady_05_join_1:ex_lady_05_join_2 = 181
dlga_ex_lady_05_join_1:close_window = 182
dlga_ex_lady_05_join_2:ex_lady_05_join_3 = 183
dlga_ex_lady_05_join_3:ex_lady_05_join_4 = 184
dlga_ex_lady_05_join_4:ex_lady_05_join_5 = 185
dlga_ex_lady_05_join_5:ex_lady_05_join_6 = 186
dlga_ex_lady_05_join_5:close_window = 187
dlga_ex_lady_05_join_6:ex_lady_05_join_7 = 188
dlga_ex_lady_05_join_7:ex_lady_05_join_8 = 189
dlga_ex_lady_05_join_8:ex_lady_05_join_9 = 190
dlga_ex_lady_05_join_9:close_window = 191
dlga_start:ex_lady_06_join_1 = 192
dlga_start:close_window.35 = 193
dlga_ex_lady_06_join_1:ex_lady_06_join_2 = 194
dlga_ex_lady_06_join_1:close_window = 195
dlga_ex_lady_06_join_2:ex_lady_06_join_3 = 196
dlga_ex_lady_06_join_3:ex_lady_06_join_4 = 197
dlga_ex_lady_06_join_4:ex_lady_06_join_5 = 198
dlga_ex_lady_06_join_5:ex_lady_06_join_6 = 199
dlga_ex_lady_06_join_5:close_window = 200
dlga_ex_lady_06_join_6:ex_lady_06_join_7 = 201
dlga_ex_lady_06_join_7:ex_lady_06_join_8 = 202
dlga_ex_lady_06_join_8:ex_lady_06_join_9 = 203
dlga_ex_lady_06_join_9:close_window = 204
dlga_start:ex_lady_07_join_1 = 205
dlga_start:close_window.36 = 206
dlga_ex_lady_07_join_1:ex_lady_07_join_2 = 207
dlga_ex_lady_07_join_1:close_window = 208
dlga_ex_lady_07_join_2:ex_lady_07_join_3 = 209
dlga_ex_lady_07_join_3:ex_lady_07_join_4 = 210
dlga_ex_lady_07_join_4:ex_lady_07_join_5 = 211
dlga_ex_lady_07_join_5:ex_lady_07_join_6 = 212
dlga_ex_lady_07_join_5:close_window = 213
dlga_ex_lady_07_join_6:ex_lady_07_join_7 = 214
dlga_ex_lady_07_join_7:ex_lady_07_join_8 = 215
dlga_ex_lady_07_join_8:ex_lady_07_join_9 = 216
dlga_ex_lady_07_join_9:close_window = 217
dlga_start:ex_lady_08_join_1 = 218
dlga_start:close_window.37 = 219
dlga_ex_lady_08_join_1:ex_lady_08_join_2 = 220
dlga_ex_lady_08_join_1:close_window = 221
dlga_ex_lady_08_join_2:ex_lady_08_join_3 = 222
dlga_ex_lady_08_join_3:ex_lady_08_join_4 = 223
dlga_ex_lady_08_join_4:ex_lady_08_join_5 = 224
dlga_ex_lady_08_join_5:ex_lady_08_join_6 = 225
dlga_ex_lady_08_join_5:close_window = 226
dlga_ex_lady_08_join_6:ex_lady_08_join_7 = 227
dlga_ex_lady_08_join_7:ex_lady_08_join_8 = 228
dlga_ex_lady_08_join_8:ex_lady_08_join_9 = 229
dlga_ex_lady_08_join_9:close_window = 230
dlga_start:ex_lady_09_join_1 = 231
dlga_start:close_window.38 = 232
dlga_ex_lady_09_join_1:ex_lady_09_join_2 = 233
dlga_ex_lady_09_join_1:close_window = 234
dlga_ex_lady_09_join_2:ex_lady_09_join_3 = 235
dlga_ex_lady_09_join_3:ex_lady_09_join_4 = 236
dlga_ex_lady_09_join_4:ex_lady_09_join_5 = 237
dlga_ex_lady_09_join_5:ex_lady_09_join_6 = 238
dlga_ex_lady_09_join_5:close_window = 239
dlga_ex_lady_09_join_6:ex_lady_09_join_7 = 240
dlga_ex_lady_09_join_7:ex_lady_09_join_8 = 241
dlga_ex_lady_09_join_8:ex_lady_09_join_9 = 242
dlga_ex_lady_09_join_9:close_window = 243
dlga_start:ex_lady_10_join_1 = 244
dlga_start:close_window.39 = 245
dlga_ex_lady_10_join_1:ex_lady_10_join_2 = 246
dlga_ex_lady_10_join_1:close_window = 247
dlga_ex_lady_10_join_2:ex_lady_10_join_3 = 248
dlga_ex_lady_10_join_3:ex_lady_10_join_4 = 249
dlga_ex_lady_10_join_4:ex_lady_10_join_5 = 250
dlga_ex_lady_10_join_5:ex_lady_10_join_6 = 251
dlga_ex_lady_10_join_5:close_window = 252
dlga_ex_lady_10_join_6:ex_lady_10_join_7 = 253
dlga_ex_lady_10_join_7:ex_lady_10_join_8 = 254
dlga_ex_lady_10_join_8:ex_lady_10_join_9 = 255
dlga_ex_lady_10_join_9:close_window = 256
dlga_start:ex_lady_11_join_1 = 257
dlga_start:close_window.40 = 258
dlga_ex_lady_11_join_1:ex_lady_11_join_2 = 259
dlga_ex_lady_11_join_1:close_window = 260
dlga_ex_lady_11_join_2:ex_lady_11_join_3 = 261
dlga_ex_lady_11_join_3:ex_lady_11_join_4 = 262
dlga_ex_lady_11_join_4:ex_lady_11_join_5 = 263
dlga_ex_lady_11_join_5:ex_lady_11_join_6 = 264
dlga_ex_lady_11_join_5:close_window = 265
dlga_ex_lady_11_join_6:ex_lady_11_join_7 = 266
dlga_ex_lady_11_join_7:ex_lady_11_join_8 = 267
dlga_ex_lady_11_join_8:ex_lady_11_join_9 = 268
dlga_ex_lady_11_join_9:close_window = 269
dlga_start:ex_lady_12_join_1 = 270
dlga_ex_lady_12_join_1:ex_lady_12_join_2 = 271
dlga_ex_lady_12_join_2:ex_lady_12_join_3 = 272
dlga_ex_lady_12_join_3:ex_lady_12_join_4 = 273
dlga_ex_lady_12_join_4:ex_lady_12_join_5 = 274
dlga_ex_lady_12_join_5:ex_lady_12_join_6 = 275
dlga_ex_lady_12_join_5:close_window = 276
dlga_ex_lady_12_join_6:close_window = 277
dlga_start:ex_lady_13_join_1 = 278
dlga_start:close_window.41 = 279
dlga_ex_lady_13_join_1:ex_lady_13_join_2 = 280
dlga_ex_lady_13_join_1:close_window = 281
dlga_ex_lady_13_join_2:ex_lady_13_join_3 = 282
dlga_ex_lady_13_join_3:ex_lady_13_join_4 = 283
dlga_ex_lady_13_join_4:ex_lady_13_join_5 = 284
dlga_ex_lady_13_join_5:ex_lady_13_join_5_1 = 285
dlga_ex_lady_13_join_5:close_window = 286
dlga_ex_lady_13_join_5_1:ex_lady_13_join_5_2 = 287
dlga_ex_lady_13_join_5_2:ex_lady_13_join_5_3 = 288
dlga_ex_lady_13_join_5_3:ex_lady_13_join_6 = 289
dlga_ex_lady_13_join_6:ex_lady_13_join_9 = 290
dlga_ex_lady_13_join_9:close_window = 291
dlga_start:ex_lady_14_join_1 = 292
dlga_start:close_window.42 = 293
dlga_ex_lady_14_join_1:ex_lady_14_join_2 = 294
dlga_ex_lady_14_join_1:close_window = 295
dlga_ex_lady_14_join_2:ex_lady_14_join_3 = 296
dlga_ex_lady_14_join_3:ex_lady_14_join_4 = 297
dlga_ex_lady_14_join_4:ex_lady_14_join_5 = 298
dlga_ex_lady_14_join_5:ex_lady_14_join_6 = 299
dlga_ex_lady_14_join_5:close_window = 300
dlga_ex_lady_14_join_6:ex_lady_14_join_7 = 301
dlga_ex_lady_14_join_7:ex_lady_14_join_8 = 302
dlga_ex_lady_14_join_8:ex_lady_14_join_9 = 303
dlga_ex_lady_14_join_9:close_window = 304
dlga_start:ex_lady_15_join_1 = 305
dlga_start:close_window.43 = 306
dlga_ex_lady_15_join_1:ex_lady_15_join_2 = 307
dlga_ex_lady_15_join_1:close_window = 308
dlga_ex_lady_15_join_2:ex_lady_15_join_3 = 309
dlga_ex_lady_15_join_3:ex_lady_15_join_4 = 310
dlga_ex_lady_15_join_4:ex_lady_15_join_5 = 311
dlga_ex_lady_15_join_5:ex_lady_15_join_6 = 312
dlga_ex_lady_15_join_5:close_window = 313
dlga_ex_lady_15_join_6:ex_lady_15_join_7 = 314
dlga_ex_lady_15_join_7:ex_lady_15_join_8 = 315
dlga_ex_lady_15_join_8:ex_lady_15_join_9 = 316
dlga_ex_lady_15_join_9:close_window = 317
dlga_start:ex_lady_16_join_1 = 318
dlga_start:close_window.44 = 319
dlga_ex_lady_16_join_1:ex_lady_16_join_2 = 320
dlga_ex_lady_16_join_1:close_window = 321
dlga_ex_lady_16_join_2:ex_lady_16_join_3 = 322
dlga_ex_lady_16_join_3:ex_lady_16_join_4 = 323
dlga_ex_lady_16_join_4:ex_lady_16_join_5 = 324
dlga_ex_lady_16_join_5:ex_lady_16_join_6 = 325
dlga_ex_lady_16_join_5:close_window = 326
dlga_ex_lady_16_join_6:ex_lady_16_join_7 = 327
dlga_ex_lady_16_join_7:ex_lady_16_join_8 = 328
dlga_ex_lady_16_join_8:ex_lady_16_join_9 = 329
dlga_ex_lady_16_join_9:close_window = 330
dlga_start:ex_lady_17_join_1 = 331
dlga_start:close_window.45 = 332
dlga_ex_lady_17_join_1:ex_lady_17_join_2 = 333
dlga_ex_lady_17_join_1:close_window = 334
dlga_ex_lady_17_join_2:ex_lady_17_join_3 = 335
dlga_ex_lady_17_join_3:ex_lady_17_join_4 = 336
dlga_ex_lady_17_join_4:ex_lady_17_join_5 = 337
dlga_ex_lady_17_join_5:ex_lady_17_join_6 = 338
dlga_ex_lady_17_join_5:close_window = 339
dlga_ex_lady_17_join_6:ex_lady_17_join_7 = 340
dlga_ex_lady_17_join_7:ex_lady_17_join_8 = 341
dlga_ex_lady_17_join_8:ex_lady_17_join_9 = 342
dlga_ex_lady_17_join_9:close_window = 343
dlga_start:ex_lady_18_join_1 = 344
dlga_start:close_window.46 = 345
dlga_ex_lady_18_join_1:ex_lady_18_join_2 = 346
dlga_ex_lady_18_join_1:close_window = 347
dlga_ex_lady_18_join_2:ex_lady_18_join_3 = 348
dlga_ex_lady_18_join_3:ex_lady_18_join_4 = 349
dlga_ex_lady_18_join_4:ex_lady_18_join_5 = 350
dlga_ex_lady_18_join_5:ex_lady_18_join_6 = 351
dlga_ex_lady_18_join_5:close_window = 352
dlga_ex_lady_18_join_6:ex_lady_18_join_7 = 353
dlga_ex_lady_18_join_7:ex_lady_18_join_8 = 354
dlga_ex_lady_18_join_8:ex_lady_18_join_9 = 355
dlga_ex_lady_18_join_9:close_window = 356
dlga_ex_lady_rejoin:ex_lady_rejoin_2 = 357
dlga_ex_lady_rejoin_2:close_window = 358
dlga_ex_lady_rejoin_2:close_window.1 = 359
dlga_ex_lady_rejoin:close_window = 360
dlga_start:npc1_join_1 = 361
dlga_start:close_window.47 = 362
dlga_npc1_join_1:npc1_join_2 = 363
dlga_npc1_join_1:close_window = 364
dlga_npc1_join_2:npc1_join_3 = 365
dlga_npc1_join_3:npc1_join_4 = 366
dlga_npc1_join_4:npc1_join_5 = 367
dlga_npc1_join_5:npc1_join_6 = 368
dlga_npc1_join_5:close_window = 369
dlga_npc1_join_6:npc1_join_7 = 370
dlga_npc1_join_7:npc1_join_8 = 371
dlga_npc1_join_8:close_window = 372
dlga_npc1_join_8:close_window.1 = 373
dlga_start:npc2_join_1 = 374
dlga_start:close_window.48 = 375
dlga_npc2_join_1:npc2_join_2 = 376
dlga_npc2_join_1:close_window = 377
dlga_npc2_join_2:npc2_join_3 = 378
dlga_npc2_join_3:npc2_join_4 = 379
dlga_npc2_join_4:npc2_join_5 = 380
dlga_npc2_join_5:npc2_join_6 = 381
dlga_npc2_join_5:close_window = 382
dlga_npc2_join_6:npc2_join_7 = 383
dlga_npc2_join_7:npc2_join_8 = 384
dlga_npc2_join_8:close_window = 385
dlga_npc2_join_8:close_window.1 = 386
dlga_start:npc3_join_1 = 387
dlga_start:close_window.49 = 388
dlga_npc3_join_1:npc3_join_2 = 389
dlga_npc3_join_1:close_window = 390
dlga_npc3_join_2:npc3_join_3 = 391
dlga_npc3_join_3:npc3_join_4 = 392
dlga_npc3_join_4:npc3_join_5 = 393
dlga_npc3_join_5:npc3_join_6 = 394
dlga_npc3_join_5:close_window = 395
dlga_npc3_join_6:npc3_join_7 = 396
dlga_npc3_join_7:npc3_join_8 = 397
dlga_npc3_join_8:close_window = 398
dlga_npc3_join_8:close_window.1 = 399
dlga_start:npc4_join_1 = 400
dlga_start:close_window.50 = 401
dlga_npc4_join_1:npc4_join_2 = 402
dlga_npc4_join_1:close_window = 403
dlga_npc4_join_2:npc4_join_3 = 404
dlga_npc4_join_3:npc4_join_4 = 405
dlga_npc4_join_4:npc4_join_5 = 406
dlga_npc4_join_5:npc4_join_6 = 407
dlga_npc4_join_5:close_window = 408
dlga_npc4_join_6:npc4_join_7 = 409
dlga_npc4_join_7:npc4_join_8 = 410
dlga_npc4_join_8:close_window = 411
dlga_npc4_join_8:close_window.1 = 412
dlga_start:npc5_join_1 = 413
dlga_start:close_window.51 = 414
dlga_npc5_join_1:npc5_join_2 = 415
dlga_npc5_join_1:close_window = 416
dlga_npc5_join_2:npc5_join_3 = 417
dlga_npc5_join_3:npc5_join_4 = 418
dlga_npc5_join_4:npc5_join_5 = 419
dlga_npc5_join_5:npc5_join_6 = 420
dlga_npc5_join_5:close_window = 421
dlga_npc5_join_6:npc5_join_7 = 422
dlga_npc5_join_7:npc5_join_8 = 423
dlga_npc5_join_8:close_window = 424
dlga_npc5_join_8:close_window.1 = 425
dlga_start:npc6_join_1 = 426
dlga_start:close_window.52 = 427
dlga_npc6_join_1:npc6_join_2 = 428
dlga_npc6_join_1:close_window = 429
dlga_npc6_join_2:npc6_join_3 = 430
dlga_npc6_join_3:npc6_join_4 = 431
dlga_npc6_join_4:npc6_join_5 = 432
dlga_npc6_join_5:npc6_join_6 = 433
dlga_npc6_join_5:close_window = 434
dlga_npc6_join_6:npc6_join_7 = 435
dlga_npc6_join_7:npc6_join_8 = 436
dlga_npc6_join_8:close_window = 437
dlga_npc6_join_8:close_window.1 = 438
dlga_start:npc7_join_1 = 439
dlga_start:close_window.53 = 440
dlga_npc7_join_1:npc7_join_2 = 441
dlga_npc7_join_1:close_window = 442
dlga_npc7_join_2:npc7_join_3 = 443
dlga_npc7_join_3:npc7_join_4 = 444
dlga_npc7_join_4:npc7_join_5 = 445
dlga_npc7_join_5:npc7_join_6 = 446
dlga_npc7_join_5:close_window = 447
dlga_npc7_join_6:npc7_join_7 = 448
dlga_npc7_join_7:npc7_join_8 = 449
dlga_npc7_join_8:close_window = 450
dlga_npc7_join_8:close_window.1 = 451
dlga_start:npc9_join_1 = 452
dlga_start:close_window.54 = 453
dlga_npc9_join_1:npc9_join_2 = 454
dlga_npc9_join_1:close_window = 455
dlga_npc9_join_2:npc9_join_3 = 456
dlga_npc9_join_3:npc9_join_4 = 457
dlga_npc9_join_4:npc9_join_5 = 458
dlga_npc9_join_5:npc9_join_6 = 459
dlga_npc9_join_5:close_window = 460
dlga_npc9_join_6:npc9_join_7 = 461
dlga_npc9_join_7:npc9_join_8 = 462
dlga_npc9_join_8:close_window = 463
dlga_npc9_join_8:close_window.1 = 464
dlga_start:npc11_join_1 = 465
dlga_start:close_window.55 = 466
dlga_npc11_join_1:npc11_join_2 = 467
dlga_npc11_join_1:close_window = 468
dlga_npc11_join_2:npc11_join_3 = 469
dlga_npc11_join_3:npc11_join_4 = 470
dlga_npc11_join_4:npc11_join_5 = 471
dlga_npc11_join_5:npc11_join_6 = 472
dlga_npc11_join_5:close_window = 473
dlga_npc11_join_6:npc11_join_7 = 474
dlga_npc11_join_7:npc11_join_8 = 475
dlga_npc11_join_8:close_window = 476
dlga_npc11_join_8:close_window.1 = 477
dlga_start:npc12_join_1 = 478
dlga_start:close_window.56 = 479
dlga_npc12_join_1:npc12_join_2 = 480
dlga_npc12_join_1:close_window = 481
dlga_npc12_join_2:npc12_join_3 = 482
dlga_npc12_join_3:npc12_join_4 = 483
dlga_npc12_join_4:npc12_join_5 = 484
dlga_npc12_join_5:npc12_join_6 = 485
dlga_npc12_join_5:close_window = 486
dlga_npc12_join_6:npc12_join_7 = 487
dlga_npc12_join_7:npc12_join_8 = 488
dlga_npc12_join_8:close_window = 489
dlga_npc12_join_8:close_window.1 = 490
dlga_start:npc13_join_1 = 491
dlga_npc13_join_1:npc13_join_2 = 492
dlga_npc13_join_1:close_window = 493
dlga_npc13_join_2:npc13_join_3 = 494
dlga_npc13_join_3:npc13_join_4 = 495
dlga_npc13_join_4:npc13_join_5 = 496
dlga_npc13_join_5:npc13_join_6 = 497
dlga_npc13_join_5:close_window = 498
dlga_npc13_join_6:npc13_join_7 = 499
dlga_npc13_join_7:npc13_join_8 = 500
dlga_npc13_join_8:close_window = 501
dlga_npc13_join_8:close_window.1 = 502
dlga_start:npc14_join_1 = 503
dlga_npc14_join_1:npc14_join_2 = 504
dlga_npc14_join_1:close_window = 505
dlga_npc14_join_2:npc14_join_3 = 506
dlga_npc14_join_3:npc14_join_4 = 507
dlga_npc14_join_4:npc14_join_5 = 508
dlga_npc14_join_5:npc14_join_6 = 509
dlga_npc14_join_5:close_window = 510
dlga_npc14_join_6:npc14_join_7 = 511
dlga_npc14_join_7:npc14_join_8 = 512
dlga_npc14_join_8:close_window = 513
dlga_npc14_join_8:close_window.1 = 514
dlga_start:npc16_join_1 = 515
dlga_npc16_join_1:npc16_join_2 = 516
dlga_npc16_join_1:close_window = 517
dlga_npc16_join_2:npc16_join_3 = 518
dlga_npc16_join_3:npc16_join_4 = 519
dlga_npc16_join_4:npc16_join_5 = 520
dlga_npc16_join_5:npc16_join_6 = 521
dlga_npc16_join_5:close_window = 522
dlga_npc16_join_6:npc16_join_7 = 523
dlga_npc16_join_7:npc16_join_8 = 524
dlga_npc16_join_8:close_window = 525
dlga_npc16_join_8:close_window.1 = 526
dlga_start:npc19_join_1 = 527
dlga_start:close_window.57 = 528
dlga_npc19_join_1:npc19_join_2 = 529
dlga_npc19_join_1:close_window = 530
dlga_npc19_join_2:npc19_join_3 = 531
dlga_npc19_join_3:npc19_join_4 = 532
dlga_npc19_join_4:npc19_join_5 = 533
dlga_npc19_join_5:npc19_join_6 = 534
dlga_npc19_join_5:close_window = 535
dlga_npc19_join_6:npc19_join_7 = 536
dlga_npc19_join_7:npc19_join_8 = 537
dlga_npc19_join_8:close_window = 538
dlga_npc19_join_8:close_window.1 = 539
dlga_start:npc21_join_1 = 540
dlga_start:close_window.58 = 541
dlga_npc21_join_1:npc21_join_2 = 542
dlga_npc21_join_1:close_window = 543
dlga_npc21_join_2:npc21_join_3 = 544
dlga_npc21_join_3:npc21_join_4 = 545
dlga_npc21_join_4:npc21_join_5 = 546
dlga_npc21_join_5:npc21_join_6 = 547
dlga_npc21_join_5:close_window = 548
dlga_npc21_join_6:npc21_join_7 = 549
dlga_npc21_join_7:npc21_join_8 = 550
dlga_npc21_join_8:close_window = 551
dlga_npc21_join_8:close_window.1 = 552
dlga_start:npc_23_join_1 = 553
dlga_start:close_window.59 = 554
dlga_npc_23_join_1:npc_23_join_2 = 555
dlga_npc_23_join_1:close_window = 556
dlga_npc_23_join_2:npc_23_join_3 = 557
dlga_npc_23_join_3:npc_23_join_4 = 558
dlga_npc_23_join_4:npc_23_join_5 = 559
dlga_npc_23_join_5:npc_23_join_6 = 560
dlga_npc_23_join_5:close_window = 561
dlga_npc_23_join_6:npc_23_join_7 = 562
dlga_npc_23_join_7:npc_23_join_8 = 563
dlga_npc_23_join_8:close_window = 564
dlga_npc_23_join_8:close_window.1 = 565
dlga_start:npc_24_join_1 = 566
dlga_start:close_window.60 = 567
dlga_npc_24_join_1:npc_24_join_2 = 568
dlga_npc_24_join_1:close_window = 569
dlga_npc_24_join_2:npc_24_join_3 = 570
dlga_npc_24_join_3:npc_24_join_4 = 571
dlga_npc_24_join_4:npc_24_join_5 = 572
dlga_npc_24_join_5:npc_24_join_6 = 573
dlga_npc_24_join_5:close_window = 574
dlga_npc_24_join_6:npc_24_join_7 = 575
dlga_npc_24_join_7:npc_24_join_8 = 576
dlga_npc_24_join_8:close_window = 577
dlga_npc_24_join_8:close_window.1 = 578
dlga_start:npc_26_join_1 = 579
dlga_start:close_window.61 = 580
dlga_npc_26_join_1:npc_26_join_2 = 581
dlga_npc_26_join_1:close_window = 582
dlga_npc_26_join_2:npc_26_join_3 = 583
dlga_npc_26_join_3:npc_26_join_4 = 584
dlga_npc_26_join_4:npc_26_join_5 = 585
dlga_npc_26_join_5:npc_26_join_6 = 586
dlga_npc_26_join_5:close_window = 587
dlga_npc_26_join_6:npc_26_join_7 = 588
dlga_npc_26_join_7:npc_26_join_8 = 589
dlga_npc_26_join_8:close_window = 590
dlga_npc_26_join_8:close_window.1 = 591
dlga_start:close_window.62 = 592
dlga_do_member_trade:member_talk = 593
dlga_member_talk:view_member_char_requested = 594
dlga_view_member_char_requested:do_member_view_char = 595
dlga_do_member_view_char:member_talk = 596
dlga_member_talk:spouse_join = 597
dlga_member_talk:member_comp_relation_talk = 598
dlga_member_comp_relation_talk:close_window = 599
dlga_member_comp_relation_talk:close_window.1 = 600
dlga_member_comp_relation_talk:close_window.2 = 601
dlga_member_comp_relation_talk:close_window.3 = 602
dlga_member_comp_relation_talk:close_window.4 = 603
dlga_member_talk:member_bonus_explain = 604
dlga_member_bonus_explain:member_talk = 605
dlga_member_bonus_explain:member_talk.1 = 606
dlga_member_bonus_explain:member_talk.2 = 607
dlga_member_bonus_explain:member_talk.3 = 608
dlga_member_bonus_explain:member_talk.4 = 609
dlga_member_bonus_explain:member_talk.5 = 610
dlga_member_bonus_explain:member_talk.6 = 611
dlga_member_bonus_explain:member_talk.7 = 612
dlga_member_bonus_explain:member_talk.8 = 613
dlga_member_bonus_explain:member_talk.9 = 614
dlga_member_bonus_explain:member_talk.10 = 615
dlga_member_bonus_explain:member_talk.11 = 616
dlga_member_bonus_explain:member_talk.12 = 617
dlga_member_bonus_explain:member_talk.13 = 618
dlga_member_bonus_explain:member_talk.14 = 619
dlga_member_bonus_explain:member_talk.15 = 620
dlga_member_bonus_explain:member_talk.16 = 621
dlga_member_bonus_explain:member_talk.17 = 622
dlga_member_bonus_explain:member_talk.18 = 623
dlga_member_bonus_explain:member_talk.19 = 624
dlga_member_bonus_explain:member_talk.20 = 625
dlga_member_bonus_explain:member_talk.21 = 626
dlga_member_bonus_explain:member_talk.22 = 627
dlga_member_bonus_explain:member_talk.23 = 628
dlga_member_bonus_explain:member_talk.24 = 629
dlga_member_bonus_explain:member_talk.25 = 630
dlga_member_bonus_explain:member_talk.26 = 631
dlga_member_bonus_explain:member_talk.27 = 632
dlga_member_bonus_explain:member_talk.28 = 633
dlga_member_bonus_explain:member_talk.29 = 634
dlga_member_bonus_explain:member_talk.30 = 635
dlga_member_bonus_explain:member_talk.31 = 636
dlga_member_bonus_explain:member_talk.32 = 637
dlga_member_bonus_explain:member_talk.33 = 638
dlga_member_talk:member_guard_1 = 639
dlga_member_guard_1:close_window = 640
dlga_member_talk:member_eyecandy_1 = 641
dlga_member_eyecandy_1:close_window = 642
dlga_member_talk:marry_for_man_player_1 = 643
dlga_marry_for_man_player_1:marry_for_man_player_2 = 644
dlga_marry_for_man_player_2:member_marryme = 645
dlga_marry_for_man_player_2:close_window = 646
dlga_member_marryme:close_window = 647
dlga_member_marryme:member_marryme_2 = 648
dlga_member_marryme_2:member_marryme_3 = 649
dlga_member_marryme_3:member_marryme_4 = 650
dlga_member_marryme_4:member_marryme_5 = 651
dlga_member_marryme_4:close_window = 652
dlga_member_marryme_5:close_window = 653
dlga_member_talk:dance_camp_comp = 654
dlga_dance_camp_comp:dance_camp_comp_choose = 655
dlga_dance_camp_comp_choose:dance_camp_comp = 656
dlga_dance_camp_comp_choose:dance_camp_comp.1 = 657
dlga_dance_camp_comp_choose:dance_camp_comp.2 = 658
dlga_dance_camp_comp_choose:dance_camp_comp.3 = 659
dlga_dance_camp_comp_choose:dance_camp_comp.4 = 660
dlga_dance_camp_comp_choose:dance_camp_comp.5 = 661
dlga_dance_camp_comp_choose:close_window = 662
dlga_dance_camp_comp_choose:close_window.1 = 663
dlga_member_talk:qua_dance_q = 664
dlga_qua_dance_q:qua_dance_a = 665
dlga_qua_dance_a:qua_dance_q = 666
dlga_qua_dance_a:qua_dance_q.1 = 667
dlga_qua_dance_a:qua_dance_q.2 = 668
dlga_qua_dance_a:qua_dance_q.3 = 669
dlga_qua_dance_a:qua_dance_q.4 = 670
dlga_qua_dance_a:qua_dance_q.5 = 671
dlga_qua_dance_a:close_window = 672
dlga_qua_dance_a:close_window.1 = 673
dlga_member_talk:member_blowjob_fe_plyr_1 = 674
dlga_member_blowjob_fe_plyr_1:member_blowjob_fe_plyr_2 = 675
dlga_member_blowjob_fe_plyr_2:member_blowjob_fe_plyr_3 = 676
dlga_member_blowjob_fe_plyr_3:member_blowjob_fe_plyr_4 = 677
dlga_member_blowjob_fe_plyr_4:member_blowjob_fe_plyr_5 = 678
dlga_member_blowjob_fe_plyr_5:member_blowjob_fe_plyr_6 = 679
dlga_member_blowjob_fe_plyr_6:close_window = 680
dlga_member_blowjob_fe_plyr_6:member_talk = 681
dlga_member_talk:member_blowjob = 682
dlga_member_blowjob:member_blowjob2 = 683
dlga_member_blowjob:member_blowjob2.1 = 684
dlga_member_blowjob2:member_blowjob3 = 685
dlga_member_blowjob3:member_blowjob_face = 686
dlga_member_blowjob3:member_blowjob_mouth = 687
dlga_member_blowjob_face:member_talk = 688
dlga_member_blowjob_mouth:member_talk = 689
dlga_member_blowjob:member_talk = 690
dlga_member_talk:fuck_camp_comp_prepre = 691
dlga_fuck_camp_comp_prepre:close_window = 692
dlga_fuck_camp_comp_prepre:close_window.1 = 693
dlga_member_talk:man_ply_camp_three_some = 694
dlga_man_ply_camp_three_some:close_window = 695
dlga_man_ply_camp_three_some:close_window.1 = 696
dlga_member_talk:man_ply_suggest_threesome = 697
dlga_man_ply_suggest_threesome:close_window = 698
dlga_man_ply_suggest_threesome:close_window.1 = 699
dlga_member_talk:qua_fuckchoose_tent = 700
dlga_qua_fuckchoose_tent:close_window = 701
dlga_member_talk:fuck_camp_comp_for_girl_pre = 702
dlga_fuck_camp_comp_for_girl_pre:close_window = 703
dlga_member_talk:girl_ply_camp_three_some = 704
dlga_girl_ply_camp_three_some:close_window = 705
dlga_member_talk:girl_ply_suggest_threesome = 706
dlga_girl_ply_suggest_threesome:close_window = 707
dlga_member_talk:member_sexofficer_a = 708
dlga_member_sexofficer_a:member_talk = 709
dlga_member_sexofficer_a:member_talk.1 = 710
dlga_member_sexofficer_a:member_talk.2 = 711
dlga_member_talk:member_ass_agree = 712
dlga_member_ass_agree:member_ass_agree_a = 713
dlga_member_ass_agree_a:member_ass_agree_b = 714
dlga_member_ass_agree_a:member_talk = 715
dlga_member_ass_agree_b:member_talk = 716
dlga_member_talk:member_ass_stops = 717
dlga_member_ass_stops:member_talk = 718
dlga_member_talk:member_cunt_view = 719
dlga_member_cunt_view:member_cunt_mess_1 = 720
dlga_member_cunt_mess_1:member_cunt_mess_2 = 721
dlga_member_cunt_mess_2:member_talk = 722
dlga_member_talk:member_separate = 723
dlga_member_separate:member_separate_confirm = 724
dlga_member_separate_confirm:member_separate_yes = 725
dlga_member_separate_confirm:do_member_trade = 726
dlga_member_separate_yes:close_window = 727
dlga_member_talk:spouse_leave = 728
dlga_spouse_leave:spouse_leave_confirm = 729
dlga_spouse_leave_confirm:close_window = 730
dlga_spouse_leave_confirm:do_member_trade = 731
dlga_member_talk:hore_leave = 732
dlga_hore_leave:hore_leave_confirm = 733
dlga_hore_leave_confirm:close_window = 734
dlga_hore_leave_confirm:do_member_trade = 735
dlga_member_talk:close_window = 736
dlga_prostitute_talk:prostitute_join = 737
dlga_prostitute_join:close_window = 738
dlga_prostitute_talk:prostitute_blow_job_1 = 739
dlga_prostitute_blow_job_1:prostitute_blow_job_1_2 = 740
dlga_prostitute_blow_job_1_2:prostitute_blow_job_2 = 741
dlga_prostitute_blow_job_2:prostitute_blow_job_3_face = 742
dlga_prostitute_blow_job_2:prostitute_blow_job_3_mouth = 743
dlga_prostitute_blow_job_3_face:prostitute_talk = 744
dlga_prostitute_blow_job_3_mouth:prostitute_talk = 745
dlga_prostitute_talk:qua_fuckchoose = 746
dlga_prostitute_talk:qua_dance_q = 747
dlga_qua_fuckchoose:qua_fuckchoose_style = 748
dlga_qua_fuckchoose_style:close_window = 749
dlga_qua_fuckchoose_style:close_window.1 = 750
dlga_prostitute_talk:close_window = 751
dlga_standby_talk:standby_join = 752
dlga_standby_join:close_window = 753
dlga_standby_talk:close_window = 754
dlga_tavernkeeper_talk:mq_75_1 = 755
dlga_mq_75_1:mq_75_2 = 756
dlga_mq_75_2:close_window = 757
dlga_mq_75_1:close_window = 758
dlga_mq_75_1:tavernkeeper_talk = 759
dlga_tavernkeeper_talk:mq_1_1 = 760
dlga_mq_1_1:mq_1_2 = 761
dlga_mq_1_2:mq_1_3 = 762
dlga_mq_1_3:mq_1_4 = 763
dlga_mq_1_4:mq_1_5 = 764
dlga_mq_1_5:mq_1_6 = 765
dlga_mq_1_5:close_window = 766
dlga_mq_1_6:close_window = 767
dlga_tavernkeeper_talk:tavernkeeper_quest = 768
dlga_tavernkeeper_talk:tavernkeeper_quest_cancel = 769
dlga_tavernkeeper_quest_cancel:close_window = 770
dlga_tavernkeeper_talk:tavernkeeper_alcohol_end = 771
dlga_tavernkeeper_alcohol_end:close_window = 772
dlga_tavernkeeper_talk:tavernkeeper_headhunt_end = 773
dlga_tavernkeeper_headhunt_end:close_window = 774
dlga_tavernkeeper_talk:tavernkeeper_buy_drinks_troops = 775
dlga_tavernkeeper_buy_drinks_troops:tavernkeeper_buy_drinks_troops_2 = 776
dlga_tavernkeeper_buy_drinks_troops_2:tavernkeeper_buy_drinks_troops_end = 777
dlga_tavernkeeper_buy_drinks_troops_end:tavernkeeper_pretalk = 778
dlga_tavernkeeper_buy_drinks_troops_2:tavernkeeper_pretalk = 779
dlga_tavernkeeper_talk:close_window = 780
dlga_tavernkeeper_pretalk:tavernkeeper_talk = 781
dlga_walker_talk:walker_mq_4_1 = 782
dlga_walker_mq_4_1:close_window = 783
dlga_walker_mq_4_1:walker_mq_4_2 = 784
dlga_walker_mq_4_2:close_window = 785
dlga_walker_mq_4_1:close_window.1 = 786
dlga_walker_talk:try_to_kidnap_sys = 787
dlga_try_to_kidnap_sys:try_to_kidnap_sys_persue = 788
dlga_try_to_kidnap_sys_persue:try_to_kidnap_sys_conclud = 789
dlga_try_to_kidnap_sys_persue:close_window = 790
dlga_try_to_kidnap_sys_conclud:close_window = 791
dlga_try_to_kidnap_sys_conclud:close_window.1 = 792
dlga_walker_talk:walker_try_prosti_1 = 793
dlga_walker_try_prosti_1:walker_try_prosti_2 = 794
dlga_walker_try_prosti_2:walker_try_prosti_blowjob_1 = 795
dlga_walker_try_prosti_blowjob_1:walker_try_prosti_blowjob_2 = 796
dlga_walker_try_prosti_blowjob_2:walker_try_prosti_blowjob_3 = 797
dlga_walker_try_prosti_blowjob_3:walker_try_prosti_blowjob_4 = 798
dlga_walker_try_prosti_blowjob_4:walker_try_prosti_blowjob_5 = 799
dlga_walker_try_prosti_blowjob_4:walker_try_prosti_blowjob_5.1 = 800
dlga_walker_try_prosti_blowjob_5:walker_talk = 801
dlga_walker_try_prosti_blowjob_1:close_window = 802
dlga_walker_try_prosti_2:walker_try_prosti_fuck_1 = 803
dlga_walker_try_prosti_fuck_1:close_window = 804
dlga_walker_try_prosti_fuck_1:close_window.1 = 805
dlga_walker_try_prosti_2:close_window = 806
dlga_walker_talk:walker_fem_pl_prosti_1 = 807
dlga_walker_fem_pl_prosti_1:walker_fem_pl_prosti_2 = 808
dlga_walker_fem_pl_prosti_2:walker_fem_pl_prosti_blow_job_1 = 809
dlga_walker_fem_pl_prosti_blow_job_1:walker_fem_pl_prosti_blow_job_2 = 810
dlga_walker_fem_pl_prosti_blow_job_2:walker_fem_pl_prosti_blow_job_3 = 811
dlga_walker_fem_pl_prosti_blow_job_3:walker_fem_pl_prosti_blow_job_4 = 812
dlga_walker_fem_pl_prosti_blow_job_4:walker_fem_pl_prosti_blow_job_5 = 813
dlga_walker_fem_pl_prosti_blow_job_5:walker_talk = 814
dlga_walker_fem_pl_prosti_blow_job_1:close_window = 815
dlga_walker_fem_pl_prosti_2:walker_fem_pl_try_prosti_fuck_1 = 816
dlga_walker_fem_pl_try_prosti_fuck_1:close_window = 817
dlga_walker_fem_pl_try_prosti_fuck_1:close_window.1 = 818
dlga_walker_fem_pl_prosti_2:close_window = 819
dlga_walker_talk:walker_talk_rumor = 820
dlga_walker_talk_rumor:walker_talk = 821
dlga_walker_talk:close_window = 822
dlga_goods_merchant_talk:try_to_steal_sys = 823
dlga_try_to_steal_sys:try_to_steal_sys_2 = 824
dlga_try_to_steal_sys_2:close_window = 825
dlga_try_to_steal_sys_2:close_window.1 = 826
dlga_try_to_steal_sys_2:close_window.2 = 827
dlga_goods_merchant_talk:close_window = 828
dlga_academy_pre_talk:academy_mq_78_1 = 829
dlga_academy_mq_78_1:academy_mq_78_2 = 830
dlga_academy_mq_78_2:academy_mq_78_3 = 831
dlga_academy_mq_78_3:academy_mq_78_4 = 832
dlga_academy_mq_78_4:academy_mq_78_5 = 833
dlga_academy_mq_78_5:close_window = 834
dlga_academy_mq_78_2:close_window = 835
dlga_academy_pre_talk:academy_mq_15_1 = 836
dlga_academy_mq_15_1:academy_mq_15_2 = 837
dlga_academy_mq_15_2:academy_mq_15_3 = 838
dlga_academy_mq_15_3:academy_mq_15_4 = 839
dlga_academy_mq_15_4:academy_mq_15_5 = 840
dlga_academy_mq_15_5:academy_mq_15_6 = 841
dlga_academy_mq_15_6:academy_mq_15_7 = 842
dlga_academy_mq_15_7:academy_mq_15_8 = 843
dlga_academy_mq_15_8:close_window = 844
dlga_academy_pre_talk:academy_mq_12_1 = 845
dlga_academy_mq_12_1:academy_mq_12_2 = 846
dlga_academy_mq_12_2:academy_mq_12_3 = 847
dlga_academy_mq_12_3:academy_mq_12_4 = 848
dlga_academy_mq_12_4:close_window = 849
dlga_academy_pre_talk:close_window = 850
dlga_academy_pre_talk:close_window.1 = 851
dlga_academy_pre_talk:academy_mq_10_1 = 852
dlga_academy_mq_10_1:academy_mq_10_2 = 853
dlga_academy_mq_10_2:academy_mq_10_3 = 854
dlga_academy_mq_10_3:academy_mq_10_4 = 855
dlga_academy_mq_10_4:academy_mq_10_5 = 856
dlga_academy_mq_10_5:academy_mq_10_6 = 857
dlga_academy_mq_10_6:academy_mq_10_7 = 858
dlga_academy_mq_10_7:academy_mq_10_8 = 859
dlga_academy_mq_10_8:academy_mq_10_9 = 860
dlga_academy_mq_10_9:academy_mq_10_10 = 861
dlga_academy_mq_10_10:close_window = 862
dlga_academy_pre_talk:academy_fail = 863
dlga_academy_fail:academy_fail_2 = 864
dlga_academy_fail_2:close_window = 865
dlga_academy_pre_talk:academy_succ = 866
dlga_academy_pre_talk:close_window.2 = 867
dlga_academy_pre_talk:close_window.3 = 868
dlga_academy_pre_talk:academy_talk = 869
dlga_academy_talk:academy_ruin_1 = 870
dlga_academy_talk:close_window = 871
dlga_academy_ruin_1:academy_ruin_2 = 872
dlga_academy_ruin_1:close_window = 873
dlga_academy_ruin_2:close_window = 874
dlga_academy_ruin_2:academy_ruin_3 = 875
dlga_academy_ruin_3:academy_ruin_4 = 876
dlga_academy_ruin_4:academy_ruin_5 = 877
dlga_academy_ruin_4:close_window = 878
dlga_academy_ruin_5:academy_ruin_6 = 879
dlga_academy_ruin_6:academy_ruin_7 = 880
dlga_academy_ruin_6:close_window = 881
dlga_academy_ruin_7:close_window = 882
dlga_academy_succ:academy_succ_2 = 883
dlga_academy_succ_2:close_window = 884
dlga_lord_talk:pope_holywar = 885
dlga_lord_talk:mq_lord_87_1 = 886
dlga_mq_lord_87_1:mq_lord_87_2 = 887
dlga_mq_lord_87_2:close_window = 888
dlga_mq_lord_87_1:mq_lord_87_2.1 = 889
dlga_mq_lord_87_1:close_window = 890
dlga_lord_talk:mq_lord_82_1 = 891
dlga_mq_lord_82_1:mq_lord_82_2 = 892
dlga_mq_lord_82_2:mq_lord_82_3 = 893
dlga_mq_lord_82_3:mq_lord_82_4 = 894
dlga_mq_lord_82_4:close_window = 895
dlga_mq_lord_82_3:mq_lord_82_end_1 = 896
dlga_mq_lord_82_end_1:mq_lord_82_end_2 = 897
dlga_mq_lord_82_end_2:mq_lord_82_end_3 = 898
dlga_mq_lord_82_end_3:close_window = 899
dlga_lord_talk:mq_lord_77_1 = 900
dlga_mq_lord_77_1:mq_lord_77_2 = 901
dlga_mq_lord_77_2:mq_lord_77_3 = 902
dlga_mq_lord_77_3:mq_lord_77_end = 903
dlga_mq_lord_77_end:close_window = 904
dlga_mq_lord_77_3:mq_lord_77_4 = 905
dlga_mq_lord_77_4:close_window = 906
dlga_lord_talk:mq_lord_2_1 = 907
dlga_mq_lord_2_1:mq_lord_2_2 = 908
dlga_mq_lord_2_2:close_window = 909
dlga_mq_lord_2_2:mq_lord_2_3 = 910
dlga_mq_lord_2_3:mq_lord_2_4 = 911
dlga_mq_lord_2_4:close_window = 912
dlga_lord_talk:player_ambush_to_neutral_1 = 913
dlga_lord_talk:lord_quest_cancel = 914
dlga_lord_quest_cancel:lord_talk = 915
dlga_lord_talk:lady_duel_req_end_1 = 916
dlga_lady_duel_req_end_1:lady_duel_req_end_2 = 917
dlga_lady_duel_req_end_2:lady_duel_req_end_3 = 918
dlga_lady_duel_req_end_3:close_window = 919
dlga_lord_talk:lord_deliver_mess_end = 920
dlga_lord_deliver_mess_end:lord_talk = 921
dlga_lord_talk:lord_enemydefeat_end = 922
dlga_lord_enemydefeat_end:lord_talk = 923
dlga_lord_talk:lord_warmonger_end = 924
dlga_lord_warmonger_end:lord_talk = 925
dlga_lord_talk:lord_quest = 926
dlga_lord_talk:lord_relation_religion_talk = 927
dlga_lord_relation_religion_talk:lord_relation_religion_diff = 928
dlga_lord_relation_religion_diff:close_window = 929
dlga_lord_relation_religion_talk:close_window = 930
dlga_lord_relation_religion_talk:lord_relation_religion_tal_2 = 931
dlga_lord_relation_religion_tal_2:close_window = 932
dlga_lord_talk:lord_bribe = 933
dlga_lord_bribe:lord_talk = 934
dlga_lord_bribe:lord_talk.1 = 935
dlga_lord_bribe:lord_talk.2 = 936
dlga_lord_bribe:lord_talk.3 = 937
dlga_lord_talk:member_comp_relation_talk = 938
dlga_lord_talk:lord_talk_duel_request_1 = 939
dlga_lord_talk_duel_request_1:close_window = 940
dlga_lord_talk_duel_request_1:close_window.1 = 941
dlga_lord_talk_duel_request_1:close_window.2 = 942
dlga_lord_talk:lord_husband_fuck_talk = 943
dlga_lord_husband_fuck_talk:close_window = 944
dlga_lord_talk:lord_husband_blowjob_1 = 945
dlga_lord_husband_blowjob_1:lord_husband_blowjob_2 = 946
dlga_lord_husband_blowjob_2:lord_husband_blowjob_3 = 947
dlga_lord_husband_blowjob_3:lord_husband_blowjob_4 = 948
dlga_lord_husband_blowjob_4:lord_husband_blowjob_5 = 949
dlga_lord_husband_blowjob_5:lord_husband_blowjob_6 = 950
dlga_lord_husband_blowjob_6:lord_talk = 951
dlga_lord_talk:lord_for_girl_sex_try = 952
dlga_lord_for_girl_sex_try:lord_talk = 953
dlga_lord_for_girl_sex_try:lord_for_girl_sex_choose = 954
dlga_lord_for_girl_sex_choose:lord_blowjob_1 = 955
dlga_lord_blowjob_1:lord_blowjob_2 = 956
dlga_lord_blowjob_1:close_window = 957
dlga_lord_blowjob_2:lord_blowjob_3 = 958
dlga_lord_blowjob_3:lord_blowjob_4 = 959
dlga_lord_blowjob_4:lord_blowjob_5 = 960
dlga_lord_blowjob_5:lord_blowjob_6 = 961
dlga_lord_blowjob_6:lord_talk = 962
dlga_lord_for_girl_sex_choose:lord_for_girl_sex_relation = 963
dlga_lord_for_girl_sex_relation:close_window = 964
dlga_lord_for_girl_sex_relation:close_window.1 = 965
dlga_lord_for_girl_sex_choose:lord_bribe_blowjob_1 = 966
dlga_lord_bribe_blowjob_1:lord_bribe_blowjob_2 = 967
dlga_lord_bribe_blowjob_1:close_window = 968
dlga_lord_bribe_blowjob_2:lord_bribe_blowjob_3 = 969
dlga_lord_bribe_blowjob_3:lord_bribe_blowjob_4 = 970
dlga_lord_bribe_blowjob_4:lord_bribe_blowjob_5 = 971
dlga_lord_bribe_blowjob_5:lord_bribe_blowjob_6 = 972
dlga_lord_bribe_blowjob_6:lord_talk = 973
dlga_lord_for_girl_sex_choose:lord_for_girl_sex_get_money = 974
dlga_lord_for_girl_sex_get_money:close_window = 975
dlga_lord_for_girl_sex_get_money:close_window.1 = 976
dlga_lord_for_girl_sex_choose:close_window = 977
dlga_lord_talk:lord_comp_sex_trade = 978
dlga_lord_comp_sex_trade:lord_talk = 979
dlga_lord_comp_sex_trade:lord_comp_sex_trade_choose = 980
dlga_lord_comp_sex_trade_choose:lord_comp_sex_trade_choos_2 = 981
dlga_lord_comp_sex_trade_choose:close_window = 982
dlga_lord_comp_sex_trade_choos_2:close_window = 983
dlga_lord_comp_sex_trade_choos_2:close_window = 984
dlga_lord_comp_sex_trade_choos_2:close_window = 985
dlga_lord_comp_sex_trade_choos_2:close_window = 986
dlga_lord_comp_sex_trade_choos_2:close_window = 987
dlga_lord_comp_sex_trade_choos_2:close_window.1 = 988
dlga_lord_talk:spouse_talk_divorce_random = 989
dlga_lord_talk:lord_coup_1 = 990
dlga_lord_talk:lord_supporter_1 = 991
dlga_lord_supporter_1:lord_supporter_2 = 992
dlga_lord_supporter_2:lord_supporter_yes = 993
dlga_lord_supporter_yes:lord_talk = 994
dlga_lord_supporter_2:lord_supporter_no = 995
dlga_lord_supporter_no:lord_talk = 996
dlga_lord_coup_1:lord_talk = 997
dlga_lord_coup_1:lord_coup_2 = 998
dlga_lord_coup_2:lord_coup_persue_civil = 999
dlga_lord_coup_persue_civil:lord_coup_persue_exposed = 1000
dlga_lord_coup_persue_civil:lord_talk = 1001
dlga_lord_coup_persue_civil:lord_talk.1 = 1002
dlga_lord_coup_persue_civil:lord_talk.2 = 1003
dlga_lord_coup_2:lord_coup_persue_ambition = 1004
dlga_lord_coup_persue_ambition:lord_coup_persue_exposed = 1005
dlga_lord_coup_persue_ambition:lord_talk = 1006
dlga_lord_coup_persue_ambition:lord_talk.1 = 1007
dlga_lord_coup_persue_ambition:lord_talk.2 = 1008
dlga_lord_coup_2:lord_coup_persue_hate = 1009
dlga_lord_coup_persue_hate:lord_coup_persue_exposed = 1010
dlga_lord_coup_persue_hate:lord_talk = 1011
dlga_lord_coup_persue_hate:lord_talk.1 = 1012
dlga_lord_coup_persue_hate:lord_talk.2 = 1013
dlga_lord_coup_2:close_window = 1014
dlga_lord_coup_persue_exposed:close_window = 1015
dlga_lord_talk:lord_talk_mercenary_1 = 1016
dlga_lord_talk_mercenary_1:close_window = 1017
dlga_lord_talk_mercenary_1:close_window.1 = 1018
dlga_lord_talk_mercenary_1:close_window.2 = 1019
dlga_lord_talk_mercenary_1:close_window.3 = 1020
dlga_lord_talk_mercenary_1:close_window.4 = 1021
dlga_lord_talk_mercenary_1:lord_talk_mercenary_2 = 1022
dlga_lord_talk_mercenary_2:lord_talk_mercenary_3 = 1023
dlga_lord_talk_mercenary_3:lord_talk_mercenary_4 = 1024
dlga_lord_talk_mercenary_3:close_window = 1025
dlga_lord_talk_mercenary_4:close_window = 1026
dlga_lord_talk:lord_ask_enter_service = 1027
dlga_lord_ask_enter_service:close_window = 1028
dlga_lord_ask_enter_service:close_window.1 = 1029
dlga_lord_ask_enter_service:close_window.2 = 1030
dlga_lord_ask_enter_service:close_window.3 = 1031
dlga_lord_ask_enter_service:close_window.4 = 1032
dlga_lord_ask_enter_service:close_window.5 = 1033
dlga_lord_ask_enter_service:close_window.6 = 1034
dlga_lord_ask_enter_service:close_window.7 = 1035
dlga_lord_ask_enter_service:join_faction = 1036
dlga_lord_ask_enter_service:join_faction = 1037
dlga_join_faction:join_faction_2 = 1038
dlga_join_faction_2:join_faction_3 = 1039
dlga_join_faction_2:close_window = 1040
dlga_join_faction_3:join_faction_4 = 1041
dlga_join_faction_4:close_window = 1042
dlga_lord_talk:lord_player_betray_1 = 1043
dlga_lord_player_betray_1:close_window = 1044
dlga_lord_player_betray_1:lord_player_betray_2 = 1045
dlga_lord_player_betray_2:lord_player_betray_3 = 1046
dlga_lord_player_betray_2:lord_player_betra_no = 1047
dlga_lord_player_betra_no:lord_talk = 1048
dlga_lord_player_betray_3:lord_player_betray_4 = 1049
dlga_lord_player_betray_4:lord_player_betray_5 = 1050
dlga_lord_player_betray_5:lord_player_betray_6 = 1051
dlga_lord_player_betray_6:close_window = 1052
dlga_lord_propose_to_ply_1:lord_propose_to_ply_2 = 1053
dlga_lord_propose_to_ply_2:lord_propose_to_ply_3 = 1054
dlga_lord_propose_to_ply_3:lord_propose_to_ply_4 = 1055
dlga_lord_propose_to_ply_4:lord_propose_to_ply_5 = 1056
dlga_lord_propose_to_ply_5:lord_propose_to_ply_6 = 1057
dlga_lord_propose_to_ply_6:close_window = 1058
dlga_marry_for_man_player_2:close_window = 1059
dlga_lord_talk:close_window = 1060
dlga_religionist_talk:convert_1 = 1061
dlga_convert_1:convert_2 = 1062
dlga_convert_2:convert_3 = 1063
dlga_convert_2:close_window = 1064
dlga_convert_3:convert_4 = 1065
dlga_convert_4:close_window = 1066
dlga_religionist_talk:close_window = 1067
dlga_lady_capture_1:lady_capture_free = 1068
dlga_lady_capture_free:close_window = 1069
dlga_lady_capture_1:lady_capture_ransom = 1070
dlga_lady_capture_ransom:close_window = 1071
dlga_lady_capture_1:lady_capture_bj_1 = 1072
dlga_lady_capture_bj_1:lady_capture_bj_2 = 1073
dlga_lady_capture_bj_2:lady_capture_bj_3 = 1074
dlga_lady_capture_bj_2:close_window = 1075
dlga_lady_capture_bj_3:lady_capture_bj_4 = 1076
dlga_lady_capture_bj_4:lady_capture_bj_5 = 1077
dlga_lady_capture_bj_5:lady_capture_bj_6 = 1078
dlga_lady_capture_bj_6:lady_capture_bj_6_face = 1079
dlga_lady_capture_bj_6:lady_capture_bj_6_mouth = 1080
dlga_lady_capture_bj_6_face:lady_capture_bj_7 = 1081
dlga_lady_capture_bj_6_mouth:lady_capture_bj_7 = 1082
dlga_lady_capture_bj_7:lady_capture_bj_8 = 1083
dlga_lady_capture_bj_8:close_window = 1084
dlga_lady_capture_1:lady_capture_slave_1 = 1085
dlga_lady_capture_slave_1:lady_capture_slave_2 = 1086
dlga_lady_capture_slave_2:lady_capture_slave_3 = 1087
dlga_lady_capture_slave_2:close_window = 1088
dlga_lady_capture_slave_3:close_window = 1089
dlga_lady_capture_1:lady_cap_marry_1 = 1090
dlga_lady_capture_1:lady_cap_marry_1.1 = 1091
dlga_lady_cap_marry_1:lady_cap_marry_2 = 1092
dlga_lady_cap_marry_2:lady_cap_marry_3 = 1093
dlga_lady_cap_marry_2:close_window = 1094
dlga_lady_cap_marry_3:lady_cap_marry_4 = 1095
dlga_lady_cap_marry_3:lady_cap_marry_4.1 = 1096
dlga_lady_cap_marry_4:close_window = 1097
dlga_lady_talk:lady_quest_cancel = 1098
dlga_lady_quest_cancel:lady_talk = 1099
dlga_lady_talk:lord_lady_rescue_lord_end = 1100
dlga_lord_lady_rescue_lord_end:lady_talk = 1101
dlga_lady_talk:lord_lady_duel_req_end = 1102
dlga_lord_lady_duel_req_end:lady_talk = 1103
dlga_lady_talk:lord_lady_beat_end = 1104
dlga_lord_lady_beat_end:lady_talk = 1105
dlga_lady_talk:lady_misdoubt_end_1 = 1106
dlga_lady_misdoubt_end_1:lady_talk = 1107
dlga_lady_talk:lady_misdoubt_end_2 = 1108
dlga_lady_misdoubt_end_2:lady_talk = 1109
dlga_lady_talk:lord_loveletter_end = 1110
dlga_lord_loveletter_end:lady_talk = 1111
dlga_lady_talk:lady_quest = 1112
dlga_lady_talk:lady_req_jailbreak_1 = 1113
dlga_lady_talk:lady_cleo_oral_skill_1 = 1114
dlga_lady_cleo_oral_skill_1:lady_cleo_oral_skill_2 = 1115
dlga_lady_cleo_oral_skill_2:lady_cleo_oral_skill_3 = 1116
dlga_lady_cleo_oral_skill_3:lady_talk = 1117
dlga_lady_cleo_oral_skill_3:lady_talk.1 = 1118
dlga_lady_talk:lady_diao_sex_skill_1 = 1119
dlga_lady_diao_sex_skill_1:lady_diao_sex_skill_2 = 1120
dlga_lady_diao_sex_skill_2:lady_diao_sex_skill_3 = 1121
dlga_lady_diao_sex_skill_3:lady_talk = 1122
dlga_lady_diao_sex_skill_3:lady_talk.1 = 1123
dlga_lady_talk:lady_backg_1 = 1124
dlga_lady_backg_1:lady_talk = 1125
dlga_lady_talk:lady_gift_1 = 1126
dlga_lady_gift_1:lady_gift_2 = 1127
dlga_lady_gift_2:close_window = 1128
dlga_lady_gift_2:lady_gift_3 = 1129
dlga_lady_gift_2:lady_gift_3 = 1130
dlga_lady_gift_2:lady_gift_3 = 1131
dlga_lady_gift_2:lady_gift_3 = 1132
dlga_lady_gift_2:lady_gift_3 = 1133
dlga_lady_gift_2:lady_gift_3 = 1134
dlga_lady_gift_2:lady_gift_3 = 1135
dlga_lady_gift_2:lady_gift_3 = 1136
dlga_lady_gift_2:lady_gift_3 = 1137
dlga_lady_gift_2:lady_gift_3 = 1138
dlga_lady_gift_2:lady_gift_3 = 1139
dlga_lady_gift_2:lady_gift_3 = 1140
dlga_lady_gift_2:lady_gift_3 = 1141
dlga_lady_gift_2:lady_gift_3 = 1142
dlga_lady_gift_2:lady_gift_3 = 1143
dlga_lady_gift_2:lady_gift_3 = 1144
dlga_lady_gift_3:lady_talk = 1145
dlga_lady_talk:member_comp_relation_talk = 1146
dlga_lady_talk:lady_virgin_attem = 1147
dlga_lady_talk:lady_spouse = 1148
dlga_lady_spouse:close_window = 1149
dlga_lady_virgin_attem:lady_talk = 1150
dlga_lady_virgin_attem:close_window = 1151
dlga_lady_virgin_attem:close_window.1 = 1152
dlga_lady_virgin_attem:close_window = 1153
dlga_lady_virgin_attem:close_window.2 = 1154
dlga_lady_virgin_attem:close_window.3 = 1155
dlga_lady_talk:spouse_talk_relation_minus_1 = 1156
dlga_spouse_talk_relation_minus_1:spouse_talk_relation_minus_2 = 1157
dlga_spouse_talk_relation_minus_2:spouse_talk_relation_minus_3 = 1158
dlga_spouse_talk_relation_minus_2:close_window = 1159
dlga_spouse_talk_relation_minus_3:spouse_talk_relation_minus_4 = 1160
dlga_spouse_talk_relation_minus_4:close_window = 1161
dlga_spouse_talk_relation_minus_4:close_window.1 = 1162
dlga_lady_talk:spouse_talk_divorce_random = 1163
dlga_spouse_talk_divorce_random:close_window = 1164
dlga_spouse_talk_divorce_random:spouse_talk_divorce_choose1 = 1165
dlga_spouse_talk_divorce_choose1:close_window = 1166
dlga_spouse_talk_divorce_choose1:close_window.1 = 1167
dlga_lady_talk:spouse_join = 1168
dlga_spouse_join:close_window = 1169
dlga_lady_talk:marry_for_man_player_1 = 1170
dlga_lady_talk:close_window = 1171
dlga_lady_slave_prison:lady_slave_suck = 1172
dlga_lady_slave_suck:lady_slave_suck_not = 1173
dlga_lady_slave_suck_not:lady_slave_whipped = 1174
dlga_lady_slave_whipped:lady_slave_suck_yes = 1175
dlga_lady_slave_suck_not:close_window = 1176
dlga_lady_slave_suck_yes:lady_slave_suck_bite = 1177
dlga_lady_slave_suck_yes:lady_slave_suck_yes_2 = 1178
dlga_lady_slave_suck_bite:lady_slave_suck_bite_1 = 1179
dlga_lady_slave_suck_bite_1:lady_slave_suck_bite_2 = 1180
dlga_lady_slave_suck_bite_2:close_window = 1181
dlga_lady_slave_suck_yes_2:lady_slave_suck_yes_3 = 1182
dlga_lady_slave_suck_yes_3:lady_slave_suck_yes_4 = 1183
dlga_lady_slave_suck_yes_4:lady_slave_suck_face = 1184
dlga_lady_slave_suck_yes_4:lady_slave_suck_mouth = 1185
dlga_lady_slave_suck_face:lady_slave_suck_after = 1186
dlga_lady_slave_suck_mouth:lady_slave_suck_after = 1187
dlga_lady_slave_suck_after:lady_slave_prison = 1188
dlga_lady_slave_suck:lady_slave_suck_yes = 1189
dlga_lady_slave_suck:lady_slave_suck_yes.1 = 1190
dlga_lady_slave_prison:lady_slave_fuck = 1191
dlga_lady_slave_fuck:lady_slave_fuck_no = 1192
dlga_lady_slave_fuck_no:lady_slave_fuck_whip = 1193
dlga_lady_slave_fuck_whip:close_window = 1194
dlga_lady_slave_fuck_no:close_window = 1195
dlga_lady_slave_fuck:lady_slave_fuck_yes = 1196
dlga_lady_slave_fuck:lady_slave_fuck_yes.1 = 1197
dlga_lady_slave_fuck_yes:close_window = 1198
dlga_lady_slave_prison:lady_slavmarry_1 = 1199
dlga_lady_slave_prison:lady_slavmarry_1.1 = 1200
dlga_lady_slavmarry_1:lady_slavmarry_2 = 1201
dlga_lady_slavmarry_1:lady_slavmarry_yes = 1202
dlga_lady_slavmarry_yes:close_window = 1203
dlga_lady_slavmarry_2:lady_slavmarry_3 = 1204
dlga_lady_slavmarry_2:close_window = 1205
dlga_lady_slavmarry_3:lady_slavmarry_4 = 1206
dlga_lady_slavmarry_3:lady_slavmarry_4.1 = 1207
dlga_lady_slavmarry_4:close_window = 1208
dlga_lady_slave_prison:lady_slave_prostitution = 1209
dlga_lady_slave_prostitution:close_window = 1210
dlga_lady_slave_prostitution:close_window.1 = 1211
dlga_lady_slave_prostitution:close_window.2 = 1212
dlga_lady_slave_prostitution:close_window.3 = 1213
dlga_lady_slave_prison:lady_slave_fightpit = 1214
dlga_lady_slave_fightpit:close_window = 1215
dlga_lady_slave_fightpit:close_window.1 = 1216
dlga_lady_slave_prostitution:close_window.4 = 1217
dlga_lady_slave_fightpit:close_window.2 = 1218
dlga_lady_slave_prison:lady_slave_torture = 1219
dlga_lady_slave_torture:close_window = 1220
dlga_lady_slave_torture:close_window.1 = 1221
dlga_lady_slave_torture:close_window.2 = 1222
dlga_lady_slave_prison:lady_slave_dancer = 1223
dlga_lady_slave_dancer:close_window = 1224
dlga_lady_slave_dancer:close_window.1 = 1225
dlga_lady_slave_dancer:close_window.2 = 1226
dlga_lady_slave_prison:lady_slave_prison_ransom_1 = 1227
dlga_lady_slave_prison_ransom_1:lady_slave_prison_ransom_2 = 1228
dlga_lady_slave_prison_ransom_2:lady_slave_prison_ransom_2a = 1229
dlga_lady_slave_prison_ransom_2a:close_window = 1230
dlga_lady_slave_prison_ransom_2:lady_slave_prison_ransom_2b = 1231
dlga_lady_slave_prison_ransom_2b:close_window = 1232
dlga_lady_slave_prison:lady_slave_free_1 = 1233
dlga_lady_slave_free_1:lady_slave_free_2 = 1234
dlga_lady_slave_free_2:lady_slave_free_2a = 1235
dlga_lady_slave_free_2a:close_window = 1236
dlga_lady_slave_free_2:lady_slave_free_2b = 1237
dlga_lady_slave_free_2b:close_window = 1238
dlga_lady_slave_prison:close_window = 1239
dlga_hideout_warder_1:hideout_warder_underwear = 1240
dlga_hideout_warder_underwear:close_window = 1241
dlga_hideout_warder_1:hideout_warder_nake = 1242
dlga_hideout_warder_nake:close_window = 1243
dlga_hideout_warder_1:hideout_warder_dance_notsame = 1244
dlga_hideout_warder_dance_notsame:close_window = 1245
dlga_hideout_warder_1:hideout_warder_dance_same = 1246
dlga_hideout_warder_dance_same:close_window = 1247
dlga_hideout_warder_1:close_window = 1248
dlga_hideout_torturer_1:hideout_torturer_2 = 1249
dlga_hideout_torturer_2:hideout_torturer_troop_info = 1250
dlga_hideout_torturer_troop_info:hideout_torturer_choose = 1251
dlga_hideout_torturer_choose:hideout_torturer_done = 1252
dlga_hideout_torturer_choose:hideout_torturer_done.1 = 1253
dlga_hideout_torturer_choose:hideout_torturer_done.2 = 1254
dlga_hideout_torturer_choose:hideout_torturer_done.3 = 1255
dlga_hideout_torturer_choose:hideout_torturer_done.4 = 1256
dlga_hideout_torturer_choose:hideout_torturer_done.5 = 1257
dlga_hideout_torturer_choose:hideout_torturer_done.6 = 1258
dlga_hideout_torturer_choose:hideout_torturer_1 = 1259
dlga_hideout_torturer_done:close_window = 1260
dlga_hideout_torturer_2:close_window = 1261
dlga_hideout_chef_1:hideout_chef_buy_drink = 1262
dlga_hideout_chef_buy_drink:close_window = 1263
dlga_hideout_chef_1:close_window = 1264
dlga_brothel_manager:brothel_manager_2 = 1265
dlga_brothel_manager_2:brothel_manager_3 = 1266
dlga_brothel_manager_3:brothel_manager_4 = 1267
dlga_brothel_manager_4:brothel_manager_5 = 1268
dlga_brothel_manager_5:close_window = 1269
dlga_brothel_manager_4:brothel_manager = 1270
dlga_brothel_manager_2:close_window = 1271
dlga_nakepit_manager:close_window = 1272
dlga_nakepit_manager:close_window.1 = 1273
dlga_nakepit_manager:close_window.2 = 1274
dlga_nakepit_manager:close_window.3 = 1275
dlga_nakepit_manager:close_window.4 = 1276
dlga_nakepit_manager:nakepit_manager_2 = 1277
dlga_nakepit_manager_2:nakepit_manager_slave_list_1 = 1278
dlga_nakepit_manager_slave_list_1:nakepit_manager_slave_list_2 = 1279
dlga_nakepit_manager_slave_list_2:nakepit_manager_slave_list_3 = 1280
dlga_nakepit_manager_slave_list_3:nakepit_manager_slave_list_4 = 1281
dlga_nakepit_manager_slave_list_4:nakepit_manager_back_prison = 1282
dlga_nakepit_manager_back_prison:close_window = 1283
dlga_nakepit_manager_slave_list_4:nakepit_manager = 1284
dlga_nakepit_manager_slave_list_2:nakepit_manager = 1285
dlga_nakepit_manager_2:nakepit_manager_bet = 1286
dlga_nakepit_manager_2:nakepit_manager_bet.1 = 1287
dlga_nakepit_manager_2:nakepit_manager_bet.2 = 1288
dlga_nakepit_manager_2:nakepit_manager_bet.3 = 1289
dlga_nakepit_manager_bet:nakepit_manager_bet_choose = 1290
dlga_nakepit_manager_bet_choose:nakepit_manager_bet_okay = 1291
dlga_nakepit_manager_bet_choose:nakepit_manager_bet_okay.1 = 1292
dlga_nakepit_manager_bet_choose:nakepit_manager_bet_okay.2 = 1293
dlga_nakepit_manager_bet_choose:nakepit_manager_bet_okay.3 = 1294
dlga_nakepit_manager_bet_choose:close_window = 1295
dlga_nakepit_manager_bet_okay:close_window = 1296
dlga_nakepit_manager_2:close_window = 1297
dlga_bath_manager:bath_manager_milk = 1298
dlga_bath_manager_milk:close_window = 1299
dlga_bath_manager:close_window = 1300
dlga_lord_ntr_1:lord_ntr_2 = 1301
dlga_lord_ntr_2:lord_ntr_3 = 1302
dlga_lord_ntr_3:lord_ntr_4 = 1303
dlga_lord_ntr_4:lord_ntr_5 = 1304
dlga_lord_ntr_5:lord_ntr_6 = 1305
dlga_lord_ntr_6:lord_ntr_7 = 1306
dlga_lord_ntr_7:lord_ntr_8 = 1307
dlga_lord_ntr_8:lord_ntr_9 = 1308
dlga_lord_ntr_9:lord_ntr_10 = 1309
dlga_lord_ntr_10:lord_ntr_11 = 1310
dlga_lord_ntr_11:lord_ntr_12 = 1311
dlga_lord_ntr_12:lord_ntr_13 = 1312
dlga_lord_ntr_13:lord_ntr_14 = 1313
dlga_lord_ntr_14:lord_ntr_15 = 1314
dlga_lord_ntr_15:lord_ntr_16 = 1315
dlga_lord_ntr_16:lord_ntr_17 = 1316
dlga_lord_ntr_17:lord_ntr_18 = 1317
dlga_lord_ntr_18:lord_ntr_19 = 1318
dlga_lord_ntr_19:lord_ntr_20 = 1319
dlga_lord_ntr_20:lord_ntr_21 = 1320
dlga_lord_ntr_21:lord_ntr_22 = 1321
dlga_lord_ntr_22:lord_ntr_23 = 1322
dlga_lord_ntr_23:close_window = 1323
dlga_lord_ntr_23:lord_ntr_24 = 1324
dlga_lord_ntr_24:lord_ntr_25 = 1325
dlga_lord_ntr_25:lord_ntr_26 = 1326
dlga_lord_ntr_26:lord_ntr_27 = 1327
dlga_lord_ntr_27:lord_ntr_28 = 1328
dlga_lord_ntr_28:lord_ntr_29 = 1329
dlga_lord_ntr_29:lord_ntr_30 = 1330
dlga_lord_ntr_30:lord_ntr_31 = 1331
dlga_lord_ntr_31:close_window = 1332
dlga_female_ply_raped_1:female_ply_raped_2 = 1333
dlga_female_ply_raped_2:female_ply_raped_3 = 1334
dlga_female_ply_raped_3:close_window = 1335
dlga_traveler_talk:mq_62_1 = 1336
dlga_mq_62_1:mq_62_2 = 1337
dlga_mq_62_2:close_window = 1338
dlga_traveler_talk:mq_52_1 = 1339
dlga_mq_52_1:mq_52_a1 = 1340
dlga_mq_52_a1:mq_52_a2 = 1341
dlga_mq_52_a2:mq_52_a3 = 1342
dlga_mq_52_a3:mq_52_a4 = 1343
dlga_mq_52_a4:close_window = 1344
dlga_mq_52_a4:mq_52_a5 = 1345
dlga_mq_52_a5:mq_52_a6 = 1346
dlga_mq_52_a6:mq_52_a7 = 1347
dlga_mq_52_a7:close_window = 1348
dlga_mq_52_1:mq_52_22 = 1349
dlga_mq_52_22:close_window = 1350
dlga_mq_52_1:mq_52_33 = 1351
dlga_mq_52_33:close_window = 1352
dlga_traveler_talk:mq_29_1 = 1353
dlga_mq_29_1:mq_29_2 = 1354
dlga_mq_29_2:close_window = 1355
dlga_mq_29_1:close_window = 1356
dlga_traveler_talk:traveler_talk_book_1 = 1357
dlga_traveler_talk:close_window = 1358
dlga_traveler_talk_book_1:traveler_talk_book_2 = 1359
dlga_traveler_talk_book_2:close_window = 1360
dlga_traveler_talk_book_2:traveler_talk_book_3 = 1361
dlga_traveler_talk_book_3:traveler_talk_book_4 = 1362
dlga_traveler_talk_book_3:close_window = 1363
dlga_traveler_talk_book_4:close_window = 1364
dlga_temp_commander_talk:player_ambush_to_neutral_1 = 1365
dlga_temp_commander_talk:close_window = 1366
dlga_player_ambush_to_neutral_1:player_ambush_to_neutral_2 = 1367
dlga_player_ambush_to_neutral_2:close_window = 1368
dlga_player_ambush_to_neutral_2:attack_innocent_money = 1369
dlga_player_ambush_to_neutral_2:close_window.1 = 1370
dlga_caravan_master_talk:player_ambush_to_neutral_1 = 1371
dlga_caravan_master_talk:caravan_master_trade = 1372
dlga_caravan_master_trade:close_window = 1373
dlga_caravan_master_talk:close_window = 1374
dlga_attack_innocent_1:close_window = 1375
dlga_attack_innocent_1:attack_innocent_money = 1376
dlga_attack_innocent_money:close_window = 1377
dlga_attack_innocent_1:close_window.1 = 1378
dlga_lord_captured_1:freed_hero_answer_1 = 1379
dlga_freed_hero_answer_1:close_window = 1380
dlga_lord_captured_1:lord_captured_ransom = 1381
dlga_lord_captured_ransom:close_window = 1382
dlga_lord_captured_1:lord_captured_release = 1383
dlga_lord_captured_release:close_window = 1384
dlga_mq_46_1:mq_46_2 = 1385
dlga_mq_46_2:mq_46_3 = 1386
dlga_mq_46_3:mq_46_4 = 1387
dlga_mq_46_3:mq_46_4.1 = 1388
dlga_mq_46_4:close_window = 1389
dlga_lady_rescue_1:lady_resc_escort = 1390
dlga_lady_resc_escort:close_window = 1391
dlga_lady_rescue_1:lady_resc_slave_1 = 1392
dlga_lady_resc_slave_1:lady_resc_slave_2 = 1393
dlga_lady_resc_slave_2:lady_resc_slave_3 = 1394
dlga_lady_resc_slave_3:close_window = 1395
dlga_lady_rescue_1:lady_resc_marry_1 = 1396
dlga_lady_rescue_1:lady_resc_marry_1.1 = 1397
dlga_lady_resc_marry_1:lady_resc_marry_2 = 1398
dlga_lady_resc_marry_2:lady_resc_marry_3 = 1399
dlga_lady_resc_marry_3:lady_resc_marry_4 = 1400
dlga_lady_resc_marry_3:lady_resc_marry_4.1 = 1401
dlga_lady_resc_marry_4:close_window = 1402
dlga_bard_talk:bard_quest_1 = 1403
dlga_bard_quest_1:bard_quest_2 = 1404
dlga_bard_quest_2:bard_quest_3 = 1405
dlga_bard_quest_3:bard_quest_4 = 1406
dlga_bard_quest_4:bard_quest_5 = 1407
dlga_bard_quest_5:close_window = 1408
dlga_bard_quest_4:close_window = 1409
dlga_bard_talk:bard_quest_cancel = 1410
dlga_bard_quest_cancel:close_window = 1411
dlga_bard_talk:bard_quest_succ = 1412
dlga_bard_quest_succ:close_window = 1413
dlga_bard_talk:close_window = 1414
dlga_tavernkeeper_quest:tavernkeeper_alcohol = 1415
dlga_tavernkeeper_alcohol:close_window = 1416
dlga_tavernkeeper_alcohol:close_window.1 = 1417
dlga_tavernkeeper_quest:tavernkeeper_headhunt = 1418
dlga_tavernkeeper_headhunt:close_window = 1419
dlga_tavernkeeper_headhunt:close_window.1 = 1420
dlga_fugitive_1:fugitive_2 = 1421
dlga_fugitive_1:close_window = 1422
dlga_fugitive_2:fugitive_3 = 1423
dlga_fugitive_3:fugitive_4 = 1424
dlga_fugitive_4:fugitive_5 = 1425
dlga_fugitive_5:fugitive_fight_start = 1426
dlga_fugitive_5:fugitive_fight_start.1 = 1427
dlga_fugitive_5:fugitive_fight_start.2 = 1428
dlga_fugitive_fight_start:close_window = 1429
dlga_tavernkeeper_quest:tavernkeeper_talk = 1430
dlga_lord_quest:lord_deliver_mess = 1431
dlga_lord_deliver_mess:lord_deliver_mess_accept = 1432
dlga_lord_deliver_mess_accept:lord_talk = 1433
dlga_lord_deliver_mess:lord_deliver_mess_reject = 1434
dlga_lord_deliver_mess_reject:lord_talk = 1435
dlga_lord_quest:lord_loveletter = 1436
dlga_lord_loveletter:lord_loveletter_accept = 1437
dlga_lord_loveletter_accept:lord_talk = 1438
dlga_lord_loveletter:lord_loveletter_reject = 1439
dlga_lord_loveletter_reject:lord_talk = 1440
dlga_lord_quest:lord_enemydefeat = 1441
dlga_lord_enemydefeat:lord_enemydefeat_accept = 1442
dlga_lord_enemydefeat_accept:lord_talk = 1443
dlga_lord_enemydefeat:lord_enemydefeat_reject = 1444
dlga_lord_enemydefeat_reject:lord_talk = 1445
dlga_lord_quest:lord_warmonger_1 = 1446
dlga_lord_warmonger_1:lord_warmonger_2 = 1447
dlga_lord_warmonger_1:lord_warmonger_reject = 1448
dlga_lord_warmonger_2:lord_warmonger_3 = 1449
dlga_lord_warmonger_3:lord_warmonger_accept = 1450
dlga_lord_warmonger_accept:lord_talk = 1451
dlga_lord_warmonger_3:lord_warmonger_reject = 1452
dlga_lord_warmonger_reject:lord_talk = 1453
dlga_lord_quest:lord_talk = 1454
dlga_lady_req_jailbreak_1:lady_req_jailbreak_2 = 1455
dlga_lady_req_jailbreak_2:lady_mission_accepted = 1456
dlga_lady_req_jailbreak_2:lady_mission_rejected = 1457
dlga_lady_mission_accepted:close_window = 1458
dlga_lady_mission_rejected:close_window = 1459
dlga_lady_quest:lady_messenger = 1460
dlga_lady_messenger:lady_messenger_accept = 1461
dlga_lady_messenger_accept:lady_talk = 1462
dlga_lady_messenger:lady_messenger_reject = 1463
dlga_lady_messenger_reject:lady_talk = 1464
dlga_lady_quest:lady_duel_req = 1465
dlga_lady_duel_req:lady_duel_req_accept = 1466
dlga_lady_duel_req_accept:lady_talk = 1467
dlga_lady_duel_req:lady_duel_req_reject = 1468
dlga_lady_duel_req_reject:lady_talk = 1469
dlga_lady_quest:lady_beat = 1470
dlga_lady_beat:lady_beat_accept = 1471
dlga_lady_beat_accept:lady_talk = 1472
dlga_lady_beat:lady_beat_reject = 1473
dlga_lady_beat_reject:lady_talk = 1474
dlga_lady_quest:lady_misdoubt = 1475
dlga_lady_misdoubt:lady_misdoubt_accept = 1476
dlga_lady_misdoubt_accept:lady_talk = 1477
dlga_lady_misdoubt:lady_misdoubt_reject = 1478
dlga_lady_misdoubt_reject:lady_talk = 1479
dlga_lord_misdoubt:lord_misdoubt_lie = 1480
dlga_lord_misdoubt_lie:close_window = 1481
dlga_lord_misdoubt:lord_misdoubt_truth = 1482
dlga_lord_misdoubt_truth:close_window = 1483
dlga_lady_quest:lady_talk = 1484
dlga_ransom_talk:broker_mq_5_1 = 1485
dlga_broker_mq_5_1:close_window = 1486
dlga_ransom_talk:broker_mq_6_1 = 1487
dlga_broker_mq_6_1:broker_mq_6_2 = 1488
dlga_broker_mq_6_2:close_window = 1489
dlga_ransom_talk:ransom_broker_sell_prisoners_2 = 1490
dlga_ransom_broker_sell_prisoners_2:close_window = 1491
dlga_ransom_talk:close_window = 1492
dlga_lord_prisoner_talk:lord_prisoner_compbj_pre = 1493
dlga_lord_prisoner_compbj_pre:lord_prisoner_compbj_0 = 1494
dlga_lord_prisoner_compbj_0:lord_prisoner_compbj_1 = 1495
dlga_lord_prisoner_compbj_0:close_window = 1496
dlga_lord_prisoner_compbj_1:lord_prisoner_compbj_2 = 1497
dlga_lord_prisoner_compbj_2:lord_prisoner_compbj_3 = 1498
dlga_lord_prisoner_compbj_3:lord_prisoner_compbj_4 = 1499
dlga_lord_prisoner_compbj_4:lord_prisoner_compbj_5 = 1500
dlga_lord_prisoner_compbj_5:lord_prisoner_compbj_6 = 1501
dlga_lord_prisoner_compbj_6:close_window = 1502
dlga_lord_prisoner_talk:lord_prisoner_blowjob = 1503
dlga_lord_prisoner_blowjob:member_blowjob_fe_plyr_2 = 1504
dlga_lord_prisoner_talk:lord_prisoner_ransom = 1505
dlga_lord_prisoner_ransom:close_window = 1506
dlga_lord_prisoner_talk:lord_prisoner_release = 1507
dlga_lord_prisoner_release:close_window = 1508
dlga_lord_prisoner_talk:lord_prisoner_whip = 1509
dlga_lord_prisoner_whip:close_window = 1510
dlga_lord_prisoner_talk:close_window = 1511
dlga_tavern_mer_rec_1:tavern_mer_rec_2 = 1512
dlga_tavern_mer_rec_2:close_window = 1513
dlga_tavern_mer_rec_1:close_window = 1514
dlga_prison_guard_talk:prison_guard_visit_prison = 1515
dlga_prison_guard_talk:close_window = 1516
dlga_prison_guard_visit_prison:prison_guard_visit_prison_2 = 1517
dlga_prison_guard_visit_prison_2:close_window = 1518
dlga_prison_guard_visit_prison_2:prison_guard_visit_prison_3 = 1519
dlga_prison_guard_visit_prison_3:prison_guard_visit_prison_4 = 1520
dlga_prison_guard_visit_prison_4:close_window = 1521
dlga_prison_guard_visit_prison_4:prison_guard_visit_prison_5 = 1522
dlga_prison_guard_visit_prison_5:close_window = 1523
dlga_prison_guard_visit_prison_4:prison_guard_visit_break = 1524
dlga_prison_guard_visit_break:close_window = 1525
dlga_orphan_talk:orphan_study = 1526
dlga_orphan_study:orphan_study_list = 1527
dlga_orphan_study_list:close_window = 1528
dlga_orphan_study_list:orphan_study_react = 1529
dlga_orphan_study_list:orphan_study_react.1 = 1530
dlga_orphan_study_list:orphan_study_react.2 = 1531
dlga_orphan_study_list:orphan_study_react.3 = 1532
dlga_orphan_study_list:orphan_study_react.4 = 1533
dlga_orphan_study_list:orphan_study_react.5 = 1534
dlga_orphan_study_list:orphan_study_react.6 = 1535
dlga_orphan_study_list:orphan_study_react.7 = 1536
dlga_orphan_study_list:orphan_study_react.8 = 1537
dlga_orphan_study_list:orphan_study_react.9 = 1538
dlga_orphan_study_list:orphan_study_react.10 = 1539
dlga_orphan_study_list:orphan_study_react.11 = 1540
dlga_orphan_study_react:orphan_talk = 1541
dlga_orphan_talk:orphan_char_requested = 1542
dlga_orphan_char_requested:orphan_view_char = 1543
dlga_orphan_view_char:orphan_talk = 1544
dlga_orphan_talk:member_comp_relation_talk = 1545
dlga_orphan_talk:orphan_bigboobherb = 1546
dlga_orphan_bigboobherb:orphan_talk = 1547
dlga_orphan_talk:orphan_smallboobherb = 1548
dlga_orphan_smallboobherb:orphan_talk = 1549
dlga_orphan_talk:orphan_join = 1550
dlga_orphan_join:close_window = 1551
dlga_orphan_talk:orphan_disable = 1552
dlga_orphan_disable:orphan_confirm = 1553
dlga_orphan_confirm:close_window = 1554
dlga_orphan_confirm:close_window.1 = 1555
dlga_orphan_talk:close_window = 1556
dlga_pope_talk:pope_holywar = 1557
dlga_pope_holywar:pope_holywar_list = 1558
dlga_pope_holywar:close_window = 1559
dlga_pope_holywar_list:pope_holywar_repond = 1560
dlga_pope_holywar_list:close_window = 1561
dlga_pope_holywar_repond:close_window = 1562
dlga_pope_holywar_repond:close_window.1 = 1563
dlga_pope_holywar_repond:close_window.2 = 1564
dlga_pope_holywar_repond:close_window.3 = 1565
dlga_pope_talk:close_window = 1566
dlga_sister_talk:mq_16_2 = 1567
dlga_mq_16_2:mq_16_3 = 1568
dlga_mq_16_3:mq_16_4 = 1569
dlga_mq_16_4:close_window = 1570
dlga_sister_talk:mq_44_2 = 1571
dlga_mq_44_2:mq_44_3 = 1572
dlga_mq_44_3:mq_44_4 = 1573
dlga_mq_44_4:mq_44_5 = 1574
dlga_mq_44_5:mq_44_6 = 1575
dlga_mq_44_6:mq_44_7 = 1576
dlga_mq_44_7:mq_44_8 = 1577
dlga_mq_44_8:mq_44_9 = 1578
dlga_mq_44_9:mq_44_10 = 1579
dlga_mq_44_10:mq_44_11 = 1580
dlga_mq_44_11:mq_44_12 = 1581
dlga_mq_44_12:mq_44_13 = 1582
dlga_mq_44_13:mq_44_14 = 1583
dlga_mq_44_14:mq_44_15 = 1584
dlga_mq_44_15:mq_44_16 = 1585
dlga_mq_44_16:mq_44_17 = 1586
dlga_mq_44_17:mq_44_18 = 1587
dlga_mq_44_18:mq_44_19 = 1588
dlga_mq_44_19:mq_44_20 = 1589
dlga_mq_44_20:mq_44_21 = 1590
dlga_mq_44_21:mq_44_22 = 1591
dlga_mq_44_22:mq_44_23 = 1592
dlga_mq_44_23:mq_44_24 = 1593
dlga_mq_44_24:close_window = 1594
dlga_sister_talk:mq_50_2 = 1595
dlga_mq_50_2:mq_50_3 = 1596
dlga_mq_50_3:mq_50_4 = 1597
dlga_mq_50_4:mq_50_5 = 1598
dlga_mq_50_5:mq_50_6 = 1599
dlga_mq_50_6:mq_50_7 = 1600
dlga_mq_50_7:mq_50_8 = 1601
dlga_mq_50_8:mq_50_9 = 1602
dlga_mq_50_9:close_window = 1603
dlga_sister_talk:sis_ord_talk = 1604
dlga_sis_ord_talk:close_window = 1605
dlga_mainq_48_1:close_window = 1606
dlga_mainq_20_1:mainq_20_2 = 1607
dlga_mainq_20_2:mainq_20_3 = 1608
dlga_mainq_20_3:mainq_20_4 = 1609
dlga_mainq_20_4:close_window = 1610
dlga_temp_talker:mainq_32_1 = 1611
dlga_mainq_32_1:mainq_32_2 = 1612
dlga_mainq_32_2:mainq_32_3 = 1613
dlga_mainq_32_3:mainq_32_4 = 1614
dlga_mainq_32_4:mainq_32_5 = 1615
dlga_mainq_32_5:close_window = 1616
dlga_temp_talker:mainq_86_1 = 1617
dlga_mainq_86_1:mainq_86_2 = 1618
dlga_mainq_86_2:mainq_86_3 = 1619
dlga_mainq_86_3:mainq_86_4 = 1620
dlga_mainq_86_4:mainq_86_5 = 1621
dlga_mainq_86_5:mainq_86_6 = 1622
dlga_mainq_86_6:mainq_86_7 = 1623
dlga_mainq_86_7:mainq_86_8 = 1624
dlga_mainq_86_8:close_window = 1625
dlga_temp_talker:mainq_83_1 = 1626
dlga_mainq_83_1:mainq_83_2 = 1627
dlga_mainq_83_2:mainq_83_3 = 1628
dlga_mainq_83_3:mainq_83_4 = 1629
dlga_mainq_83_4:mainq_83_5 = 1630
dlga_mainq_83_5:mainq_83_6 = 1631
dlga_mainq_83_6:mainq_83_7 = 1632
dlga_mainq_83_7:mainq_83_8 = 1633
dlga_mainq_83_8:mainq_83_9 = 1634
dlga_mainq_83_9:mainq_83_10 = 1635
dlga_mainq_83_10:mainq_83_11 = 1636
dlga_mainq_83_11:close_window = 1637
dlga_temp_talker:mainq_72_1 = 1638
dlga_mainq_72_1:mainq_72_2 = 1639
dlga_mainq_72_2:mainq_72_3 = 1640
dlga_mainq_72_3:mainq_72_4 = 1641
dlga_mainq_72_4:mainq_72_5 = 1642
dlga_mainq_72_5:close_window = 1643
dlga_temp_talker:mainq_70_1 = 1644
dlga_mainq_70_1:mainq_70_2 = 1645
dlga_mainq_70_2:mainq_70_3 = 1646
dlga_mainq_70_3:mainq_70_4 = 1647
dlga_mainq_70_4:mainq_70_5 = 1648
dlga_mainq_70_5:close_window = 1649
dlga_temp_talker:mainq_69_1 = 1650
dlga_mainq_69_1:mainq_69_2 = 1651
dlga_mainq_69_2:mainq_69_3 = 1652
dlga_mainq_69_3:mainq_69_4 = 1653
dlga_mainq_69_4:mainq_69_5 = 1654
dlga_mainq_69_5:mainq_69_6 = 1655
dlga_mainq_69_6:close_window = 1656
dlga_temp_talker:mainq_60_1 = 1657
dlga_mainq_60_1:mainq_60_2a = 1658
dlga_mainq_60_2a:close_window = 1659
dlga_mainq_60_1:mainq_60_2b = 1660
dlga_mainq_60_2b:mainq_60_3b = 1661
dlga_mainq_60_3b:mainq_60_4a = 1662
dlga_mainq_60_4a:close_window = 1663
dlga_mainq_60_3b:mainq_60_4b = 1664
dlga_mainq_60_4b:close_window = 1665
dlga_temp_talker:mainq_59_1 = 1666
dlga_mainq_59_1:mainq_59_2 = 1667
dlga_mainq_59_2:mainq_59_3 = 1668
dlga_mainq_59_3:mainq_59_4 = 1669
dlga_mainq_59_4:close_window = 1670
dlga_temp_talker:mainq_54_1 = 1671
dlga_mainq_54_1:mainq_54_2 = 1672
dlga_mainq_54_2:mainq_54_3 = 1673
dlga_mainq_54_3:mainq_54_4 = 1674
dlga_mainq_54_4:mainq_54_5 = 1675
dlga_mainq_54_5:mainq_54_6 = 1676
dlga_mainq_54_6:close_window = 1677
dlga_mq_95_1:mq_95_2 = 1678
dlga_mq_95_2:mq_95_3 = 1679
dlga_mq_95_3:mq_95_4 = 1680
dlga_mq_95_4:close_window = 1681
dlga_mq_94_1:mq_94_2 = 1682
dlga_mq_94_2:close_window = 1683
dlga_member_chat:close_window.1 = 1684
dlga_member_chat:regular_member_talk = 1685
dlga_regular_member_talk:view_regular_char_requested = 1686
dlga_view_regular_char_requested:do_regular_member_view_char = 1687
dlga_do_regular_member_view_char:regular_member_talk = 1688
dlga_regular_member_talk:close_window = 1689
dlga_prisoner_chat:prisoner_chat_2 = 1690
dlga_prisoner_chat_2:close_window = 1691
dlga_prisoner_chat_2:close_window.1 = 1692
|
from pathlib import Path
from typing import List, Dict, Any, Optional
import yaml
from remi import App
import os
DAY_FORMAT = "%Y-%m-%d"
HOUR_FORMAT = "%H:%M:%S"
DATE_FORMAT = f"{DAY_FORMAT} {HOUR_FORMAT}"
# the number of seconds between to events to be
# considered as two different events
EVENTS_SEQUENCE_SEPARATION = 5
class Config:
APP_INSTANCE: App = None
APP_USERNAME: Optional[str] = os.environ.get("APP_USERNAME")
APP_PASSWORD: Optional[str] = os.environ.get("APP_PASSWORD")
APP_PORT: int = 4000
DATA_DIR = Path("data")
MODELS_DIR = Path("models")
STATIC_DATA_DIR = Path("app/static")
SNAPSHOTS_DIR = DATA_DIR / Path("snapshots")
CONFIG_PATH = Path("data/settings.yaml")
CAMERA_SNAPSHOT_PREVIEW_SIZE = (1440, 1080)
THUMBNAIL_SIZE = (224, 224)
MINI_THUMBNAIL_SIZE = (128, 128)
CAMERA_DEFAULT_IMAGE = STATIC_DATA_DIR / "images/placeholder.jpg"
FONT_PATH = STATIC_DATA_DIR / "fonts/InputSans-Regular.ttf"
LOGGER_HISTORY_SIZE = 5
@staticmethod
def list_models() -> List[str]:
return list(p.name for p in Config.MODELS_DIR.glob("*"))
@staticmethod
def dump_config(config: Dict[str, Any]):
"""Dump application config (camera parameters etc) to settings.yml """
with Config.CONFIG_PATH.open("w") as file:
yaml.dump(config, file)
@staticmethod
def load_config() -> Optional[Dict[str, Any]]:
if not Config.CONFIG_PATH.exists():
return None
with Config.CONFIG_PATH.open("r") as file:
config = yaml.load(file, Loader=yaml.Loader)
return config
|
import re
import os
import logging
from concurrent.futures import ProcessPoolExecutor
from libs.libmd5 import md5_for_text
from libs.librequests.api import (
download_text,
download_binary,
)
logger = logging.getLogger(__name__)
image_url_ptn = re.compile(r'src="(https://mmbiz.qpic.cn/mmbiz_.*?)"')
def get_img_urls(page_url):
content = download_text(page_url)
image_src_urls = image_url_ptn.findall(content)
return image_src_urls
def download_images(page_url, output_dir, log_func=print, thread_num=5):
image_src_urls = get_img_urls(page_url)
image_files = []
total = len(image_src_urls)
log_func('开始下载文章图片: %s 图, 并发: %s, 文章 URL: %s' % (
total, thread_num, page_url))
process_pool_executor = ProcessPoolExecutor(max_workers=thread_num)
futures = []
for idx, img_url in enumerate(image_src_urls):
url_md5 = md5_for_text(img_url)
filename = 'img-%03d-url-%s.jpg' % (idx+1, url_md5)
future = process_pool_executor.submit(
download_and_save_image, img_url, output_dir, filename)
futures.append([idx, future])
for idx, f in futures:
try:
fullpath = f.result(timeout=15)
except Exception:
logger.warning('failed to download image. idx: %s, article url: %s' % (
idx, page_url))
else:
image_files.append(fullpath)
# logger.info('(%s/%s) downloaded %s' % (idx, total, url))
return image_files
def download_and_save_image(url, output_dir, filename):
content = download_binary(url)
if not os.path.exists(output_dir): # pragma: no cover
os.makedirs(output_dir)
fullpath = os.path.join(output_dir, filename)
with open(fullpath, 'wb') as fw:
fw.write(content)
return fullpath
|
import json
import numpy as np
import os
import sys
import torch
from datasets import mnist, celeba
from neural_process import NeuralProcessImg
from time import strftime
from training import NeuralProcessTrainer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Get config file from command line arguments
# if len(sys.argv) != 2:
# raise(RuntimeError("Wrong arguments, use python main_experiment.py <path_to_config>"))
config_path = 'config.json'
# Create a folder to store experiment results
timestamp = strftime("%Y-%m-%d_%H-%M")
directory = "results_{}".format(timestamp)
if not os.path.exists(directory):
os.makedirs(directory)
# Open config file
with open(config_path) as config_file:
config = json.load(config_file)
# Save config file in experiment directory
with open(directory + '/config.json', 'w') as config_file:
json.dump(config, config_file)
img_size = config["img_size"]
batch_size = config["batch_size"]
r_dim = config["r_dim"]
h_dim = config["h_dim"]
z_dim = config["z_dim"]
num_context_range = config["num_context_range"]
num_extra_target_range = config["num_extra_target_range"]
epochs = config["epochs"]
if config["dataset"] == "mnist":
data_loader, _ = mnist(batch_size=batch_size, size=img_size[1])
elif config["dataset"] == "celeba":
data_loader = celeba(batch_size=batch_size, size=img_size[1])
classify = True
np_img = NeuralProcessImg(img_size, r_dim, z_dim, h_dim,classify=classify).to(device)
optimizer = torch.optim.Adam(np_img.parameters(), lr=config["lr"])
np_trainer = NeuralProcessTrainer(device, np_img, optimizer,
num_context_range, num_extra_target_range,
batch_size, print_freq=100, classify=classify)
for epoch in range(epochs):
print("Epoch {}".format(epoch + 1))
np_trainer.train(data_loader, 1)
# Save losses at every epoch
with open(directory + '/losses.json', 'w') as f:
json.dump(np_trainer.epoch_loss_history, f)
# Save model at every epoch
torch.save(np_trainer.neural_process.state_dict(), directory + '/model.pt')
|
# Generated by Django 2.2.5 on 2020-05-19 21:46
import atlas.models.change
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("atlas", "0029_change_user")]
operations = [
migrations.AddField(
model_name="change",
name="previous",
field=django.contrib.postgres.fields.jsonb.JSONField(
encoder=atlas.models.change.CustomJSONEncoder, null=True
),
),
migrations.AlterField(
model_name="change",
name="changes",
field=django.contrib.postgres.fields.jsonb.JSONField(
encoder=atlas.models.change.CustomJSONEncoder
),
),
migrations.AlterField(
model_name="change",
name="object_type",
field=models.CharField(
choices=[
("user", "user"),
("office", "office"),
("department", "department"),
],
max_length=32,
),
),
]
|
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.transforms as transforms
from dataloader.gaussian_blur import GaussianBlur
from torchvision import datasets
from .dataset import ClrDataset
np.random.seed(0)
class DataSetWrapper(object):
def __init__(self,
batch_size,
num_workers,
valid_size,
input_shape,
s,
csv_file,
csv_test_file,
img_root_dir,
img_root_dir_test,
img_path_col,
text_col,
text_from_files,
text_root_dir):
self.batch_size = batch_size
self.num_workers = num_workers
self.valid_size = valid_size
self.s = s
self.input_shape = eval(input_shape)
self.csv_file = csv_file
self.csv_test_file = csv_test_file
self.img_root_dir = img_root_dir
self.img_root_dir_test = img_root_dir_test
self.img_path_col = img_path_col
self.text_col = text_col
self.text_from_files = text_from_files
self.text_root_dir = text_root_dir
def get_train_data_loaders(self):
data_augment = self._get_simclr_pipeline_transform()
train_dataset = ClrDataset(csv_file=self.csv_file,
img_root_dir=self.img_root_dir,
img_root_dir_test=self.img_root_dir_test,
input_shape = self.input_shape,
img_path_col = self.img_path_col,
text_col = self.text_col,
text_from_files = self.text_from_files,
text_root_dir = self.text_root_dir,
mode = 'train',
transform=SimCLRTrainDataTransform(data_augment)
)
print("num_train len : ", len(train_dataset))
# print("train_dataset_data1")
# print(train_dataset[0]['phrase'])
# print("train_dataset_data2")
# for ais, als in train_dataset:
# print(ais)
train_loader, valid_loader = self.get_train_validation_data_loaders(train_dataset)
return train_loader, valid_loader
def get_test_data_loaders(self):
data_augment = self._get_simclr_pipeline_transform()
test_dataset = ClrDataset(csv_file=self.csv_test_file,
img_root_dir=self.img_root_dir,
img_root_dir_test=self.img_root_dir_test,
input_shape = self.input_shape,
img_path_col = self.img_path_col,
text_col = self.text_col,
text_from_files = self.text_from_files,
text_root_dir = self.text_root_dir,
mode='test',
transform=SimCLRTestDataTransform(data_augment)
)
print("num_test len : ", len(test_dataset))
print(test_dataset)
# print("train_dataset_data1")
# print(train_dataset[0]['phrase'])
# print("train_dataset_data2")
# test_loader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False)
print("load test_loader....")
return test_dataset
def _get_simclr_pipeline_transform(self):
# get a set of data augmentation transformations as described in the SimCLR paper.
color_jitter = transforms.ColorJitter(0.8 * self.s, 0.8 * self.s, 0.8 * self.s, 0.2 * self.s)
data_transforms = transforms.Compose([
transforms.Scale((self.input_shape[0], self.input_shape[1])),
transforms.RandomResizedCrop(size=self.input_shape[0], scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(),
# transforms.RandomApply([color_jitter], p=0.8),
transforms.RandomGrayscale(p=0.2),
# GaussianBlur(kernel_size=int(0.1 * self.input_shape[0])),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
return data_transforms
def get_train_validation_data_loaders(self, train_dataset):
# obtain training indices that will be used for validation
num_train = len(train_dataset)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(self.valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# train_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=True, shuffle=False)
# valid_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=True)
train_loader = DataLoader(train_dataset, batch_size=self.batch_size, sampler=train_sampler,
num_workers=self.num_workers, drop_last=True, shuffle=False)
valid_loader = DataLoader(train_dataset, batch_size=self.batch_size, sampler=valid_sampler,
num_workers=self.num_workers, drop_last=True)
print("load train_loader....")
# print(train_idx)
# print(valid_idx)
# print(len(train_loader))
return train_loader, valid_loader
class SimCLRTrainDataTransform(object):
def __init__(self, transform_image):
self.transform_image = transform_image
def __call__(self, sample):
xi = self.transform_image(sample['image'])
xl = sample['phrase']
return xi, xl
class SimCLRTestDataTransform(object):
def __init__(self, transform_image):
self.transform_image = transform_image
def __call__(self, sample):
xi = self.transform_image(sample['image'])
id = sample['img_id']
return xi, id
|
#!/usr/bin/env python3
import requests
import yaml
import json
configFile = "config.yml"
# save api requests to files
def save(name, obj):
with open('users/'+name+'.yaml', 'w') as f:
yaml.dump(obj, f)
def printJson(json_object):
print(json.dumps(json_object, indent=1))
def getUsers(cnf):
cnf["max_pages"] = int(cnf["max_pages"])
for x in range(1, cnf["max_pages"]):
r = requests.get("https://api.github.com/search/users?q=followers:" + str(cnf["min_followers"]) + "&sort=followers&per_page="+ str(cnf["per_page"]))
data = r.json()
for user in data["items"]:
save(user["login"], user)
def readConfig():
with open(configFile, 'r') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
getUsers(data["users"])
readConfig()
|
from os import listdir
from PIL import Image
import os
dataset = "./cleaned/"
train_path = dataset + "train_frames/"
val_path = dataset + "val_frames/"
test_path = dataset + "test_frames/"
sets = [train_path, val_path, test_path]
for dataset in sets:
emotions = os.listdir(dataset)
for emotion in emotions:
frames = os.listdir(train_path+emotion)
i=0
for frame in frames:
if frame.endswith('.bmp'):
try:
img = Image.open(train_path+emotion + "/" + frame) # open the image file
img.verify() # verify that it is, in fact an image
except (IOError, SyntaxError) as e:
print('Bad file:', frame, e) # print out the names of corrupt files
i+=1
print("Frame: " + frame + " is the number " + i +" corrupted frame")
|
import os
import sys
import re
import glob
import urllib.request, urllib.error, urllib.parse
import json
from MediaManager.image.library.image import ImageInfo
class AlbumArtLibrary(object):
def __init__(self, image_cache_path, online=True):
self.__image_cache_path = image_cache_path
self.__not_found = {}
self.__online = online
def cover_image(self, album_info):
if album_info in self.__not_found:
return None
file_name = 'cover.%s' % album_info.name
glob_pattern = os.path.join(self.__image_cache_path, file_name + '*')
glob_pattern = glob_pattern.replace('[', '?').replace(']', '?')
files = glob.glob(glob_pattern)
if files:
return ImageInfo(files[0])
print('# No match for: %s' % os.path.join(self.__image_cache_path, file_name + '*'))
if not self.__online:
self.__not_found[album_info] = None
return None
query = '"%s" %s album cover' % (album_info.artist, album_info.title)
query = query.replace('_', ' ')
query = query.replace('-', ' ')
urls = self.__google_image_search(query)
if not urls:
self.__not_found[album_info] = None
return None
for url in urls:
image_url = url['url']
file_ext = url['ext']
print('# Fetching: %s' % image_url)
try:
response = urllib.request.urlopen(image_url)
break
except (urllib.error.HTTPError, urllib.error.URLError) as e:
print('! %s' % e, file=sys.stderr)
else:
self.__not_found[album_info] = None
return None
file_path = os.path.join(self.__image_cache_path, '%s.%s' % (file_name, file_ext))
data = response.read()
if not data:
self.__not_found[album_info] = None
return None
open(file_path, 'wb').write(data)
print('# Wrote: %s' % file_path)
return ImageInfo(file_path)
def __google_image_search(self, query):
fields = {'tbm': 'isch',
'tbs': 'isz:m',
'q': query,
}
url = ('https://www.google.com/search?' + urllib.parse.urlencode(fields))
print('# Google Image Search: %s' % query)
headers = {
'Referer': '',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
}
request = urllib.request.Request(url, None, headers) #/* Enter the URL of your site here */})
try:
response = urllib.request.urlopen(request)
except urllib.error.URLError as e:
print('! %s' % e, file=sys.stderr)
return None
content = response.read().decode()
image_json = re.compile('<div class="rg_meta notranslate">(?P<image>.*?)</div>')
images = []
for m in image_json.finditer(content):
try:
images.append(json.loads(m.group('image')))
except ValueError:
pass
return [{'url': i['ou'], 'ext': i['ity']} for i in images
if i['ity'] in ('jpg', 'jpeg', 'png', 'gif') and
i['oh'] >= 200 and
i['ow'] >= 200]
|
import torch
import torch.nn as nn
from mimic.networks.FeatureCompressor import LinearFeatureCompressor
from mimic.networks.char_encoding import DataGeneratorText as DataGeneratorText_CharEnc
from mimic.networks.char_encoding import FeatureExtractorText as FeatureExtractorText_CharEnc
from mimic.networks.word_encoding import DataGeneratorText as DataGeneratorText_WordEnc
from mimic.networks.word_encoding.mmvae_text_enc import FeatureExtractorText as FeatureExtractorText_WordEnc
class EncoderText(nn.Module):
def __init__(self, flags, style_dim):
super(EncoderText, self).__init__()
self.args = flags
if flags.text_encoding == 'char':
self.feature_extractor = FeatureExtractorText_CharEnc(flags)
elif flags.text_encoding == 'word':
self.feature_extractor = FeatureExtractorText_WordEnc(flags)
self.feature_compressor = LinearFeatureCompressor(5 * flags.DIM_text,
style_dim,
flags.class_dim)
def forward(self, x_text):
# d_model must be divisible by nhead
# text_in = nn.functional.one_hot(x_text.to(torch.int64), num_classes=self.args.vocab_size)
# encoder_layer = nn.TransformerEncoderLayer(d_model=x_text.shape[-1], nhead=8)
# transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=8)
# h_text = transformer_encoder(text_in)
# todo is this better?
h_text = self.feature_extractor(x_text)
if self.feature_compressor.style_mu and self.feature_compressor.style_logvar:
mu_style, logvar_style, mu_content, logvar_content = self.feature_compressor(h_text)
return mu_content, logvar_content, mu_style, logvar_style
else:
mu_content, logvar_content = self.feature_compressor(h_text)
return mu_content, logvar_content
class DecoderText(nn.Module):
def __init__(self, flags, style_dim):
super(DecoderText, self).__init__()
self.flags = flags
self.feature_generator = nn.Linear(style_dim + flags.class_dim,
5 * flags.DIM_text, bias=True)
if flags.text_encoding == 'char':
self.text_generator = DataGeneratorText_CharEnc(flags)
elif flags.text_encoding == 'word':
self.text_generator = DataGeneratorText_WordEnc(flags)
# self.text_generator = Dec(flags)
def forward(self, z_style, z_content):
if self.flags.factorized_representation:
z = torch.cat((z_style, z_content), dim=1).squeeze(-1)
# z.shape = [100, 64]
else:
z = z_content
text_feat_hat = self.feature_generator(z)
text_feat_hat = text_feat_hat.unsqueeze(-1)
# predict in batches to spare GPU memory
if text_feat_hat.shape[0] > self.flags.batch_size:
dl = torch.utils.data.DataLoader(text_feat_hat, batch_size=self.flags.batch_size)
text_hat = torch.Tensor().to(self.flags.device)
for batch in dl:
text_hat = torch.cat(tensors=(text_hat, self.text_generator(batch)))
else:
text_hat = self.text_generator(text_feat_hat)
text_hat = text_hat.transpose(-2, -1)
return [text_hat]
|
import sys
import json
import redis
if __name__ == '__main__':
with open(sys.argv[1]) as f:
config = json.load(f)
host = config['redis_address']
port = config['redis_port']
redis_client = redis.Redis(host='localhost', port=6379)
# Load the config file
routine_config_file_path = config['routine_config_file']
with open(routine_config_file_path) as f:
routine_config = json.load(f)
message = json.dumps(routine_config)
channel = config['source']
redis_client.publish(channel=channel, message=message)
|
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
strs = []
tmp = ''
for s in paragraph:
if s in '!? \';.,':
if tmp:
strs.append(tmp)
tmp = ''
else:
tmp += s.lower()
if tmp:
strs.append(tmp)
cnt = {}
max_num = 0
res = ''
banned = set(banned)
for string in strs:
if string not in banned:
if string not in cnt:
cnt[string] = 1
else:
cnt[string] += 1
if cnt[string] > max_num:
max_num = cnt[string]
res = string
return res
|
#coding: utf-8
SAMPLE_RATE = 44100
WAVETABLE_SAMPLE_SIZE = 512
KEYNOTE_FREQ = [27.5, 29.13523509, 30.86770633, 32.70319566, 34.64782887, 36.70809599, 38.89087297, 41.20344461, 43.65352893, 46.24930284, 48.9994295, 51.9130872, 55, 58.27047019, 61.73541266, 65.40639133, 69.29565774, 73.41619198, 77.78174593, 82.40688923, 87.30705786, 92.49860568, 97.998859, 103.8261744, 110, 116.5409404, 123.4708253, 130.8127827, 138.5913155, 146.832384, 155.5634919, 164.8137785, 174.6141157, 184.9972114, 195.997718, 207.6523488, 220, 233.0818808, 246.9416506, 261.6255653, 277.182631, 293.6647679, 311.1269837, 329.6275569, 349.2282314, 369.9944227, 391.995436, 415.3046976, 440, 466.1637615, 493.8833013, 523.2511306, 554.365262, 587.3295358, 622.2539674, 659.2551138, 698.4564629, 739.9888454, 783.990872, 830.6093952, 880, 932.327523, 987.7666025, 1046.502261, 1108.730524, 1174.659072, 1244.507935, 1318.510228, 1396.912926, 1479.977691, 1567.981744, 1661.21879, 1760, 1864.655046, 1975.533205, 2093.004522, 2217.461048, 2349.318143, 2489.01587, 2637.020455, 2793.825851, 2959.955382, 3135.963488, 3322.437581, 3520, 3729.310092, 3951.06641, 4186.009045]
keynote_delta_int32 = []
for frequency in KEYNOTE_FREQ:
val = WAVETABLE_SAMPLE_SIZE << 16
val *= frequency
val /= SAMPLE_RATE
val = int(round(val))
keynote_delta_int32.append(val)
f = open('keydelta.c', 'wb')
f.write("#include \"keydelta.h\"\n\nkeydelta_t key_delta[" + str(len(keynote_delta_int32)) + "] = {" + ", ".join([str(x) for x in keynote_delta_int32]) + "};\n")
f.close()
|
import rospy
from std_msgs.msg import String
def callback(str):
pass
if __name__=="__main__":
rospy.init_node("start")
rospy.logerr("script start")
rospy.Subscrier("lll",String,callback)
rospy.spin()
|
import os
import json
from typing import Dict
from athena.helpers.exception import EnvironmentException
ATTACH = '__attachments__'
MESSAGE = 'message'
def structure(clz=None):
"""
The structure decorator provides an interface to create structured data by defining inner data
classes within a Python object. It respects Python typing and attribute syntax: object type
annotations are applied when deserializing JSON, and exceptions are thrown in cases where
underlying data is not represented and no default value has been provided. In cases where a
default value is provided, the data within the structured source will override the underlying
default if present.
"""
attachments = []
# we look at annotations too, since attributes will not exist for required attachments
for att_name in set(getattr(clz, '__annotations__', ())).union(set(dir(clz)) - set(dir(object))):
# we don't want to consider class attributes
if not att_name.startswith('__'):
attachments.append(att_name)
setattr(clz, ATTACH, attachments)
return clz
def cast(obj, value):
if getattr(obj, '__annotations__', None):
clazz = obj.__annotations__.get(value)
if getattr(obj, value, False) is not None: # don't try to cast a typed None
if clazz == bool:
return lambda val: str(val) in ["True", "true", "TRUE", "t", "T", "1"]
return clazz
return lambda val: val
def object_to_attributes(obj, retrieve):
for clz_name in (set(dir(obj)) - set(dir(object))):
clz = getattr(obj, clz_name)
if hasattr(clz, ATTACH):
clz = clz() # reset attachments from prior runs
for att_name in getattr(clz, ATTACH):
value = retrieve(att_name, None)
if value is None and not hasattr(clz, att_name):
exception_message = f"Missing required configuration: '{att_name}'"
raise EnvironmentException(exception_message)
setattr(clz, att_name, cast(clz, att_name)(value or getattr(clz, att_name, None)))
setattr(obj, clz_name, clz)
def object_to_dictionary(obj) -> Dict[str, any]:
message = {}
for clz_name in (set(dir(obj)) - set(dir(object))):
clz = getattr(obj, clz_name)
if hasattr(clz, ATTACH):
for att_name in getattr(clz, ATTACH):
attribute = getattr(clz, att_name, None)
if attribute:
message[att_name] = attribute
return message
def read_message(path=''):
if os.path.exists('%smessage.json' % path):
with open('%smessage.json' % path) as input_message:
message = json.loads(input_message.read())
return message
return {}
def write_message(message: Dict[str, any], path=''):
if not os.path.exists(path):
os.makedirs(path)
if len(message) > 0:
with open('%smessage.json' % path, 'w') as file:
file.write(json.dumps(message))
|
from flask import abort
import requests
def readable_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
num = num - num % 0.01
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'K', 'M', 'B', 'T'][magnitude])
def multi_scrape(string_start, string_end, data):
index_start = 0
index_end = 0
for i in data:
if string_start in i:
index_start = data.index(i)
if string_end in i:
i.find(string_end)
index_end = data.index(i)
output = ''
for index in range(index_start, index_end+1):
output += data[index] + ','
if 'date' in data[index+1]:
output += '|'
return output
def get_earnings(input_data):
output = []
data = multi_scrape('earningsChart', 'currentQuarterEstimateYear', input_data)
data_arr = data.split('[')[1].split(']')[0].split('|')
for date in data_arr:
q_date = date.split(':')[1].split(',')[0].strip('\"')
q_date = q_date[1] + q_date[0] + '_' + q_date[2:]
estimate = float(date.split(':')[7].split(',')[0].strip('}').strip('\"'))
actual = float(date.split(':')[4].split(',')[0].strip('}').strip('\"'))
output.append({q_date: {'estimate': estimate,
'actual': actual
}
})
return output
def get_financials(input_data):
output = []
data = multi_scrape('financialsChart', 'financialCurrency', input_data)
data_arr_y = data.split('quarterly')[0].split('[')[1].split(']')[0].split('|')
data_arr_q = data.split('quarterly')[1].split('[')[1].split(']')[0].split('|')
yearly = []
for date in data_arr_y:
y_date = date.split(':')[1].split(',')[0]
revenue = float(date.split(':')[3].split(',')[0])
earnings = float(date.split(':')[7].split(',')[0])
yearly.append({y_date: {'revenue': readable_format(revenue),
'earnings': readable_format(earnings)
}
})
quarterly = []
for date in data_arr_q:
q_date = date.split(':')[1].split(',')[0].strip('\"')
q_date = q_date[1] + q_date[0] + '_' + q_date[2:]
revenue = float(date.split(':')[3].split(',')[0])
earnings = float(date.split(':')[7].split(',')[0])
quarterly.append({q_date: {'revenue': readable_format(revenue),
'earnings': readable_format(earnings)
}
})
output.append({'yearly': yearly})
output.append({'quarterly': quarterly})
return output
def data_fetch(symbol):
rating = key = target_avg = target_low = target_high = price = currency = None
url = f'https://finance.yahoo.com/quote/{symbol}'
data = requests.get(url).text.split(',')
for i in data:
if "All (0)" in i:
return abort(404)
else:
if 'recommendationMean' in i:
rating = float(i.split(':')[-1])
if 'recommendationKey' in i:
key = i.split(':')[1].strip('""')
if 'targetMeanPrice' in i:
target_avg = float(i.split(':')[-1])
if 'targetLowPrice' in i:
target_low = float(i.split(':')[-1])
if 'targetHighPrice' in i:
target_high = float(i.split(':')[-1])
if 'currentPrice' in i:
price = float(i.split(':')[-1])
if 'financialCurrency' in i:
currency = i.split(':')[-1].strip('""')
earnings = get_earnings(data)
financials = get_financials(data)
return {symbol: {'recRating': rating,
'recKey': key,
'analystTarget': {
'current': price,
'avg': target_avg,
'low': target_low,
'high': target_high
},
'currency': currency,
'earnings': earnings,
'financials': financials
}
}
|
import os
import re
import sphinx
import sys
import urllib
from docutils import nodes
from docutils.parsers.rst import directives
from jinja2 import Template
from pkg_resources import parse_version
from sphinx.environment import NoUri
from sphinxcontrib.needs.utils import status_sorter
from sphinxcontrib.needs.filter_common import FilterBase, procces_filters
sphinx_version = sphinx.__version__
if parse_version(sphinx_version) >= parse_version("1.6"):
from sphinx.util import logging
else:
import logging
logger = logging.getLogger(__name__)
if sys.version_info.major < 3:
urlParse = urllib.quote_plus
else:
urlParse = urllib.parse.quote_plus
class Needflow(nodes.General, nodes.Element):
pass
class NeedflowDirective(FilterBase):
"""
Directive to filter needs and present them inside a list, table or diagram.
.. deprecated:: 0.2.0
Use needlist, needtable or needdiagram instead
"""
option_spec = {'show_legend': directives.flag,
'show_filters': directives.flag,
'show_link_names': directives.flag,
'link_types': directives.unchanged_required}
# Update the options_spec with values defined in the FilterBase class
option_spec.update(FilterBase.base_option_spec)
def run(self):
env = self.state.document.settings.env
if not hasattr(env, 'need_all_needflows'):
env.need_all_needflows = {}
# be sure, global var is available. If not, create it
if not hasattr(env, 'needs_all_needs'):
env.needs_all_needs = {}
id = env.new_serialno('needflow')
targetid = "needflow-{docname}-{id}".format(
docname=env.docname,
id=id)
targetnode = nodes.target('', '', ids=[targetid])
link_types = self.options.get("link_types", [])
if len(link_types) > 0:
link_types = [link_type.strip() for link_type in re.split(";|,", link_types)]
for i in range(len(link_types)):
if len(link_types[i]) == 0 or link_types[i].isspace():
del (link_types[i])
logger.warning('Scruffy link_type definition found in needflow {}. '
'Defined link_type contains spaces only.'.format(id))
# Add the need and all needed information
env.need_all_needflows[targetid] = {
'docname': env.docname,
'lineno': self.lineno,
'target_node': targetnode,
'show_filters': True if self.options.get("show_filters", False) is None else False,
'show_legend': True if self.options.get("show_legend", False) is None else False,
'show_link_names': True if self.options.get("show_link_names", False) is None else False,
'link_types': link_types,
'export_id': self.options.get("export_id", ""),
'env': env
}
env.need_all_needflows[targetid].update(self.collect_filter_attributes())
return [targetnode] + [Needflow('')]
def make_entity_name(name):
"""Creates a valid PlantUML entity name from the given value."""
invalid_chars = "-=!#$%^&*[](){}/~'`<>:;"
for char in invalid_chars:
name = name.replace(char, "_")
return name
def process_needflow(app, doctree, fromdocname):
# Replace all needflow nodes with a list of the collected needs.
# Augment each need with a backlink to the original location.
env = app.builder.env
link_types = env.config.needs_extra_links
allowed_link_types_options = [link.upper() for link in env.config.needs_flow_link_types]
# NEEDFLOW
for node in doctree.traverse(Needflow):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ('ids', 'names', 'classes', 'dupnames'):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needflow = env.need_all_needflows[id]
all_needs = env.needs_all_needs
option_link_types = [link.upper() for link in current_needflow['link_types']]
for lt in option_link_types:
if lt not in [link['option'].upper() for link in link_types]:
logger.warning('Unknown link type {link_type} in needflow {flow}. Allowed values: {link_types}'.format(
link_type=lt, flow=current_needflow['target_node'], link_types=",".join(link_types)
))
content = []
try:
if "sphinxcontrib.plantuml" not in app.config.extensions:
raise ImportError
from sphinxcontrib.plantuml import plantuml
except ImportError:
content = nodes.error()
para = nodes.paragraph()
text = nodes.Text("PlantUML is not available!", "PlantUML is not available!")
para += text
content.append(para)
node.replace_self(content)
continue
plantuml_block_text = ".. plantuml::\n" \
"\n" \
" @startuml" \
" @enduml"
puml_node = plantuml(plantuml_block_text, **dict())
puml_node["uml"] = "@startuml\n"
puml_connections = ""
all_needs = list(all_needs.values())
found_needs = procces_filters(all_needs, current_needflow)
processed_need_part_ids = []
for need_info in found_needs:
# Check if need_part was already handled during handling of parent need.
# If this is the case, it is already part of puml-code and we do not need to create a node.
if not (need_info['is_part'] and need_info['id_complete'] in processed_need_part_ids):
# Check if we need to embed need_parts into parent need, because they are also part of search result.
node_part_code = ""
valid_need_parts = [x for x in found_needs if x['is_part'] and x['id_parent'] == need_info['id']]
for need_part in valid_need_parts:
part_link = calculate_link(app, need_part)
diagram_template = Template(env.config.needs_diagram_template)
part_text = diagram_template.render(**need_part)
node_part_code += '{style} "{node_text}" as {id} [[{link}]] {color}\n'.format(
id=make_entity_name(need_part["id_complete"]), node_text=part_text,
link=make_entity_name(part_link), color=need_part["type_color"],
style=need_part["type_style"])
processed_need_part_ids.append(need_part['id_complete'])
link = calculate_link(app, need_info)
diagram_template = Template(env.config.needs_diagram_template)
node_text = diagram_template.render(**need_info)
if need_info['is_part']:
need_id = need_info['id_complete']
else:
need_id = need_info['id']
node_code = '{style} "{node_text}" as {id} [[{link}]] {color} {{\n {need_parts} }}\n'.format(
id=make_entity_name(need_id), node_text=node_text,
link=make_entity_name(link), color=need_info["type_color"],
style=need_info["type_style"], need_parts=node_part_code)
puml_node["uml"] += node_code
for link_type in link_types:
# Skip link-type handling, if it is not part of a specified list of allowed link_types or
# if not part of the overall configuration of needs_flow_link_types
if (current_needflow["link_types"] and link_type['option'].upper() not in option_link_types) or \
(not current_needflow["link_types"] and \
link_type['option'].upper() not in allowed_link_types_options):
continue
for link in need_info[link_type['option']]:
if '.' in link:
# final_link = link.split('.')[0]
final_link = link
if current_needflow["show_link_names"] or env.config.needs_flow_show_links:
desc = link_type['outgoing'] + '\\n'
else:
desc = ''
comment = ': {desc}{part}'.format(desc=desc, part=link.split('.')[1])
if "style_part" in link_type.keys() and link_type['style_part'] is not None and \
len(link_type['style_part']) > 0:
link_style = '[{style}]'.format(style=link_type['style_part'])
else:
link_style = "[dotted]"
else:
final_link = link
if current_needflow["show_link_names"] or env.config.needs_flow_show_links:
comment = ': {desc}'.format(desc=link_type['outgoing'])
else:
comment = ''
if "style" in link_type.keys() and link_type['style'] is not None and \
len(link_type['style']) > 0:
link_style = '[{style}]'.format(style=link_type['style'])
else:
link_style = ""
# Do not create an links, if the link target is not part of the search result.
if final_link not in [x['id'] for x in found_needs if x['is_need']] and \
final_link not in [x['id_complete'] for x in found_needs if x['is_part']]:
continue
puml_connections += '{id} --{link_style}> {link}{comment}\n'.format(
id=make_entity_name(need_info["id"]),
link=make_entity_name(final_link),
comment=comment,
link_style=link_style
)
puml_node["uml"] += puml_connections
# Create a legend
if current_needflow["show_legend"]:
puml_node["uml"] += "legend\n"
puml_node["uml"] += "|= Color |= Type |\n"
for need in app.config.needs_types:
puml_node["uml"] += "|<back:{color}> {color} </back>| {name} |\n".format(
color=need["color"], name=need["title"])
puml_node["uml"] += "endlegend\n"
puml_node["uml"] += "@enduml"
puml_node["incdir"] = os.path.dirname(current_needflow["docname"])
puml_node["filename"] = os.path.split(current_needflow["docname"])[1] # Needed for plantuml >= 0.9
content.append(puml_node)
if len(content) == 0:
nothing_found = "No needs passed the filters"
para = nodes.paragraph()
nothing_found_node = nodes.Text(nothing_found, nothing_found)
para += nothing_found_node
content.append(para)
if current_needflow["show_filters"]:
para = nodes.paragraph()
filter_text = "Used filter:"
filter_text += " status(%s)" % " OR ".join(current_needflow["status"]) if len(
current_needflow["status"]) > 0 else ""
if len(current_needflow["status"]) > 0 and len(current_needflow["tags"]) > 0:
filter_text += " AND "
filter_text += " tags(%s)" % " OR ".join(current_needflow["tags"]) if len(
current_needflow["tags"]) > 0 else ""
if (len(current_needflow["status"]) > 0 or len(current_needflow["tags"]) > 0) and len(
current_needflow["types"]) > 0:
filter_text += " AND "
filter_text += " types(%s)" % " OR ".join(current_needflow["types"]) if len(
current_needflow["types"]) > 0 else ""
filter_node = nodes.emphasis(filter_text, filter_text)
para += filter_node
content.append(para)
node.replace_self(content)
def calculate_link(app, need_info):
# Link calculation
# All links we can get from docutils functions will be relative.
# But the generated link in the svg will be relative to the svg-file location
# (e.g. server.com/docs/_images/sqwxo499cnq329439dfjne.svg)
# and not to current documentation. Therefore we need to add ../ to get out of the _image folder.
try:
link = "../" + app.builder.get_target_uri(need_info['docname']) \
+ "?highlight={0}".format(urlParse(need_info['title'])) \
+ "#" \
+ need_info['target_node']['refid'] \
# Gets mostly called during latex generation
except NoUri:
link = ""
return link
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='docxify',
version='0.1.3',
description='docxify and dedocxify files for fun and profit',
url='https://github.com/giphahne/docxify',
author='Dan Hahne',
author_email='contact@danhahne.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=['base58', 'python-docx', 'argcomplete'],
packages=['docxify'],
entry_points={
'console_scripts': [
'docxify=docxify:docxify',
'dedocxify=docxify:dedocxify',
],
},
)
|
from django.utils.html import format_html
from logger.models import User
from functools import reduce
import django_tables2 as tables
import math
import datetime
class UserTable(tables.Table):
def render_card_id(self, value):
return format_html('<a href="{}">{}</a>', value, value)
class Meta:
model = User
class DayTable(tables.Table):
user = tables.Column(order_by=('user.last_name', 'user.first_name'))
attendance = tables.Column()
starttimes = tables.Column()
endtimes = tables.Column()
def render_user(self, value):
return format_html('<a href="{}">{}</a>',
"/logger/user/"+str(value.card_id),
value.last_name.capitalize() +" "+ value.first_name.capitalize())
def render_attendance(self, value):
if(isinstance(value, datetime.timedelta)):
return math.floor(value.seconds/60)
else:
return value
def render_starttimes(self, value):
return reduce((lambda x,y: (x.strftime("%H:%M:%S") if not isinstance(x, str) else x) + ", " + (y.strftime("%H:%M:%S") if not isinstance(y, str) else y)), value)
def render_endtimes(self, value):
return reduce((lambda x,y: (x.strftime("%H:%M:%S") if not isinstance(x, str) else x) + ", " + (y.strftime("%H:%M:%S") if not isinstance(y, str) else y)), value)
|
import unittest
import EllucianEthosPythonClient
import pytz
import datetime
class test_WorkerThread_prefCounterTimeValueISBefore(unittest.TestCase):
def test_nextTimeSimple(self):
self.assertEqual(
EllucianEthosPythonClient.getNextWorkerTime(
lastRunTime=pytz.timezone('UTC').localize(datetime.datetime(2020, 1, 14, 23, 3, second=5)),
curTime=pytz.timezone('UTC').localize(datetime.datetime(2020, 1, 14, 23, 3, second=10)),
frequency=6
),
pytz.timezone('UTC').localize(datetime.datetime(2020, 1, 14, 23, 3, second=11))
)
def test_nextTimeMissedRun(self):
self.assertEqual(
EllucianEthosPythonClient.getNextWorkerTime(
lastRunTime=pytz.timezone('UTC').localize(datetime.datetime(2020, 1, 14, 23, 3, second=5)),
curTime=pytz.timezone('UTC').localize(datetime.datetime(2020, 1, 14, 23, 3, second=13)),
frequency=6
),
pytz.timezone('UTC').localize(datetime.datetime(2020, 1, 14, 23, 3, second=17))
)
|
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Project, Rating, UserProfile
# Create your tests here.
class TestProject(TestCase):
def setUp(self):
self.user=User(email='francis@gmail.com',username='fgithae',first_name='user',last_name='fgithae')
self.project=Project(title='insta',description='insta clone',url='https://insta-mirro.com',image='https://cloudinary.com/insta.jpg',user=self.user)
self.profile=UserProfile(name='francis githae',bio='a frank effect',user=self.user,profile_photo='photo/photo.jpg')
self.rating=Rating(user=self.user,project=self.project,design_rating=5,usability_rating=6,content_rating=5)
def test_instance(self):
self.assertTrue(isinstance(self.user,User))
self.assertTrue(isinstance(self.project,Project))
def test_save_project(self):
self.user.save()
self.project.save_project()
self.assertTrue(len(Project.objects.all())>0)
def test_save_profile(self):
self.user.save()
self.profile.save_profile()
self.assertTrue(len(UserProfile.objects.all())>0)
def test_save_rating(self):
self.user.save()
self.project.save_project()
self.rating.save_rating()
self.assertTrue(len(Rating.objects.all())>0)
def test_get_all_projects(self):
self.user.save()
self.project.save_project()
self.all_projects=Project.get_all_projects()
self.assertEquals(len(self.all_projects),len(Project.objects.all()))
|
#!/usr/bin/env python3
NUMBER_OF_MARKS = 5
if __name__ == '__main__':
mark_list = []
for count in range(NUMBER_OF_MARKS):
while True:
mark = int(input(f'Enter mark #{count + 1}: '))
if 0 <= mark <= 100:
mark_list.append(mark)
break
else:
print('Out of range.')
avg_mark = sum(mark_list) / NUMBER_OF_MARKS
print()
print(f'Average Mark: {avg_mark}')
print(f'Highest Mark: {max(mark_list)}')
print(f'Lowest Mark: {min(mark_list)}')
|
"""
Definition of views.
"""
from django.conf import settings
from django.shortcuts import render
from django.http import HttpRequest, JsonResponse, HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from cgi import parse_qs, escape
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import views as auth_views
from django.core.urlresolvers import reverse
from app.models import *
from app.forms import *
from django.core import serializers
from django.utils import timezone, translation
from django.utils.translation import ugettext_lazy as _
import json
import datetime
import pytz
import os
import phonenumbers
from .forms import *
from django.forms.utils import ErrorList
from django.contrib.auth.models import User
from azure.storage.blob import BlockBlobService
from azure.storage.blob import PublicAccess
from azure.storage.blob import ContentSettings
import logging
from redcap import Project, RedcapError
#import urllib
from twilio.access_token import AccessToken, IpMessagingGrant
from twilio.rest.ip_messaging import TwilioIpMessagingClient
from pyfcm import FCMNotification
class DivErrorList(ErrorList):
def __unicode__(self): # __unicode__ on Python 2
return self.as_spans()
def as_spans(self):
if not self: return ""
return "".join(['<div><span class="control-label">%s</span></div>' % e for e in self])
def dashboard(request):
"""Renders the dashboard page."""
assert isinstance(request, HttpRequest)
# Check if user is logged in. Otherwise redirect to login page.
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse("login"))
unread_patients = []
unread_patients = Patient.objects.filter(unread_messages__gt=0)
# Maps patient object to a mapping of esas surveys with a symptom over 7
# to the corresponding dashboard alert
unordered_patient_esas_alert = {}
ordered_patient_esas_alert = []
esas_alerts = DashboardAlert.objects.filter(category=DashboardAlert.ESAS)
sorted_esas = []
for alert in esas_alerts:
sorted_esas.append(ESASSurvey.objects.get(pk=alert.item_pk))
# Sort esas by created_date
sorted_esas.sort(key=lambda x: x.created_date, reverse=True)
# Get all the patients, and related alerts and esas
for esas in sorted_esas:
alert = DashboardAlert.objects.get(category=DashboardAlert.ESAS, item_pk=esas.pk)
if alert.patient not in unordered_patient_esas_alert.keys():
unordered_patient_esas_alert[alert.patient] = ()
# Do some conversions between tuples and lists cause lists cant be stored in dictionaries
# and need to maintain order
tup = unordered_patient_esas_alert[alert.patient]
lst = list(tup)
lst.append((esas, alert))
unordered_patient_esas_alert[alert.patient] = tuple(lst)
# Sort patients by full_name
esas_patients = unordered_patient_esas_alert.keys()
esas_patients.sort(key=lambda x: x.full_name)
for patient in esas_patients:
ordered_patient_esas_alert.append((patient, unordered_patient_esas_alert[patient]))
# Maps patient object to a mapping of incomplete medication reports
# to the corresponding dashboard alert
unordered_patient_medication_alert = {}
ordered_patient_medication_alert = []
medication_alerts = DashboardAlert.objects.filter(category=DashboardAlert.MEDICATION)
sorted_medication = []
for alert in medication_alerts:
sorted_medication.append(MedicationReport.objects.get(pk=alert.item_pk))
# Sort medication by created date
sorted_medication.sort(key=lambda x: x.created_date, reverse=True)
# Get all the patients and related alerts and medication
for medication in sorted_medication:
alert = DashboardAlert.objects.get(category=DashboardAlert.MEDICATION, item_pk=medication.pk)
if alert.patient not in unordered_patient_medication_alert.keys():
unordered_patient_medication_alert[alert.patient] = ()
# Do some conversions between tuples and lists cause lists cant be stored in dictionaries
# and need to maintain order
tup = unordered_patient_medication_alert[alert.patient]
lst = list(tup)
lst.append((medication, alert))
unordered_patient_medication_alert[alert.patient] = tuple(lst)
# Sort patients by full_name
medication_patients = unordered_patient_medication_alert.keys()
medication_patients.sort(key=lambda x: x.full_name)
for patient in medication_patients:
ordered_patient_medication_alert.append((patient, unordered_patient_medication_alert[patient]))
following_patients = request.user.doctor.patients.all()
context = {
"title":_("Dashboard"),
"year":datetime.datetime.now().year,
"unread_patients": unread_patients,
"patient_esas_alert": ordered_patient_esas_alert,
"patient_medication_alert": ordered_patient_medication_alert,
"following_patients": following_patients,
}
return render(
request,
"app/dashboard.html",
context
)
def login_redirect(request, **kwargs):
# Checks to see if user is logged in. If so, redirect to dashboard page.
if request.user.is_authenticated():
return HttpResponseRedirect(reverse("dashboard"))
else:
return auth_views.login(request, **kwargs)
def patients(request):
"""Renders the patients page."""
assert isinstance(request, HttpRequest)
# Check if user is logged in. Otherwise redirect to login page.
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse("login"))
print "request.user.username:", request.user.username
patient_results = []
if request.method == "GET":
print "[views.searchPatients] got GET request"
# Get "patient_query" url param
patient_query = request.GET.get("patient_query", "")
print "patient_query:", patient_query
doctor = Doctor.objects.get(user=request.user)
print "doctor:", doctor
if patient_query == "":
# No specific patient query. Show all patients
#patient_results = doctor.patients.all()
patient_results = Patient.objects.all()
else:
# Actual query. Fetch close matches.
#longer_matches = doctor.patients.filter(full_name__search=patient_query)
patient_results = doctor.patients.filter(full_name__icontains=patient_query)
# Trigram matches will exclude results that are "farther" distance away.
#tri_matches = doctor.patients.filter(full_name__lower__trigram_similar=patient_query)
#patient_results = list(set(longer_matches).union(set(tri_matches)))
else:
print "else"
query_patients_form = QueryPatientsForm()
context = {
"title": _("Patients"),
"message": _("List of patients."),
"year": datetime.datetime.now().year,
"patient_results": patient_results,
"query_patients_form": query_patients_form,
}
return render(
request,
"app/patients.html",
context
)
def follow_patient(request):
""" Handle a doctor following a patient """
doctor = Doctor.objects.get(pk=request.POST["doctor_pk"])
patient = Patient.objects.get(pk=request.POST["patient_pk"])
doctor.patients.add(patient)
return HttpResponseRedirect("/patients")
def unfollow_patient(request):
""" Handle a doctor unfollowing a patient """
doctor = Doctor.objects.get(pk=request.POST["doctor_pk"])
patient = Patient.objects.get(pk=request.POST["patient_pk"])
doctor.patients.remove(patient)
return HttpResponseRedirect("/patients")
def patient_profile(request):
"""Renders the patient profile page."""
assert isinstance(request, HttpRequest)
# Check if user is logged in. Otherwise redirect to login page.
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse("login"))
print request.GET
patient_pk = request.GET["pk"]
print "patient_pk:", patient_pk
patient_obj = Patient.objects.get(pk=patient_pk)
notes_form = PatientNotesForm(initial={"notes": patient_obj.doctor_notes})
create_notification_form = CreateNotificationForm()
add_video_form = AddVideoForm()
create_medication_form = CreateMedicationForm()
upload_image_form = UploadImageForm()
edit_patient_form = EditPatientForm()
edit_patient_form.fields["pk"].initial = patient_obj.pk
### Home tab.
editing_patient = False
if "edit" in request.GET:
print "edit:", request.GET["edit"]
editing_patient = True
edit_patient_form.fields["full_name"].initial = patient_obj.full_name
edit_patient_form.fields["hospital_id"].initial = patient_obj.hospital_id
edit_patient_form.fields["esas_alert"].initial = patient_obj.esas_alert
edit_patient_form.fields["age"].initial = patient_obj.age
edit_patient_form.fields["gender"].initial = patient_obj.gender
edit_patient_form.fields["treatment_type"].initial = patient_obj.treatment_type
edit_patient_form.fields["tumor_type"].initial = patient_obj.tumor_type
edit_patient_form.fields["comorbidities"].initial = patient_obj.comorbidities
edit_patient_form.fields["caregiver_name"].initial = patient_obj.caregiver_name
edit_patient_form.fields["caregiver_relationships"].initial = patient_obj.caregiver_relationships
edit_patient_form.fields["city_of_residence"].initial = patient_obj.city_of_residence
edit_patient_form.fields["telephone"].initial = patient_obj.telephone
edit_patient_form.fields["next_appointment"].initial = patient_obj.next_appointment
print "edit_patient_form:", edit_patient_form.fields
### Notifications tab.
notifications = Notification.objects.filter(patient=patient_obj)
#### Videos tab.
videos = Video.objects.filter(patient=patient_obj)
### Messages tab.
channels = []
channels.append(get_channel(request, patient_obj))
token = get_token(request.user.username)
"""
# List the channels that the user is a member of
for c in settings.TWILIO_IPM_SERVICE.channels.list():
if c.unique_name == patient_obj.user.username:
print "selected channel", c.friendly_name, c.sid
channel_json = {
"sid": str(c.sid),
"unique_name": str(c.unique_name),
"friendly_name": str(c.friendly_name),
}
channels.append(channel_json)
break
token = AccessToken(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_API_KEY, settings.TWILIO_API_SECRET, request.user.username)
endpoint = "PalliAssist:" + request.user.username + ":web"
# Create an IP Messaging grant and add to token
ipm_grant = IpMessagingGrant(endpoint_id=endpoint, service_sid=settings.TWILIO_IPM_SERVICE_SID)
token.add_grant(ipm_grant)
"""
### ESAS tab.
esas_objects = ESASSurvey.objects.filter(patient=patient_obj)
esas_millis = ESASSurvey.objects.filter(patient=patient_obj)
for esas in esas_millis:
esas.created_date = convert_datetime_to_millis(esas.created_date)
esas_json = serializers.serialize("json", esas_millis)
### Pain tab.
pain_objects = PainSurvey.objects.filter(patient=patient_obj)
for pain in pain_objects:
for point in pain.front_points.all():
point.rgb = 255.0 / ((((point.intensity + 1.0) / 11.0) * 2.0) + 1.0)
point.save()
pain_images = PainImages.objects.filter(patient=patient_obj)
### Medication tab.
medications = Medication.objects.filter(patient=patient_obj)
for med in medications:
med.posology = "h, ".join(med.posology.split(";")) + "h"
medication_reports = MedicationReport.objects.filter(patient=patient_obj)
context = {
"title": _("Patient Profile"),
"message": _("Patient profile."),
"year": datetime.datetime.now().year,
"patient": patient_obj,
"editing_patient": editing_patient,
"edit_patient_form": edit_patient_form,
"notes_form": notes_form,
"add_video_form": add_video_form,
"create_notification_form": create_notification_form,
"create_medication_form": create_medication_form,
"upload_image_form": upload_image_form,
"notifications": notifications,
"videos": videos,
"medications": medications,
"medication_reports": medication_reports,
"esas_objects": esas_objects,
"esas_json": esas_json,
"pain_objects": pain_objects,
"pain_width": 207,
"pain_height": 400,
"pain_images": pain_images,
"channels": channels,
"token": token, # Twilio token for messaging tab.
}
return render(
request,
"app/patient_profile.html",
context
)
def edit_patient_info(request):
print "edit_patient_info request:", request.POST
edit_patient_form = EditPatientForm(request.POST)
return HttpResponseRedirect("/patient-profile?pk=" + str(edit_patient_form.data["pk"]) + "&edit=true")
def save_patient_info(request):
""" """
print "save_patient_info request:", request.POST
edit_patient_form = EditPatientForm(request.POST)
if edit_patient_form.is_valid():
form_data = edit_patient_form.cleaned_data
patient = Patient.objects.get(pk=form_data["pk"])
if form_data["hospital_id"]:
patient.hospital_id = form_data["hospital_id"]
if form_data["full_name"]:
patient.full_name = form_data["full_name"]
if form_data["age"]:
patient.age = form_data["age"]
if form_data["gender"]:
patient.gender = form_data["gender"]
if form_data["telephone"]:
patient.telephone = form_data["telephone"]
if form_data["esas_alert"]:
patient.esas_alert = form_data["esas_alert"]
if form_data["city_of_residence"]:
patient.city_of_residence = form_data["city_of_residence"]
if form_data["caregiver_name"]:
patient.caregiver_name = form_data["caregiver_name"]
if form_data["caregiver_relationships"]:
patient.caregiver_relationships = form_data["caregiver_relationships"]
if form_data["treatment_type"]:
patient.treatment_type = form_data["treatment_type"]
if form_data["tumor_type"]:
patient.tumor_type = form_data["tumor_type"]
if form_data["comorbidities"]:
patient.comorbidities = form_data["comorbidities"]
if form_data["next_appointment"]:
patient.next_appointment = form_data["next_appointment"]
patient.save()
return HttpResponseRedirect("/patient-profile?pk=" + str(patient.pk))
return HttpResponseRedirect("/patient-profile?pk=" + str(edit_patient_form.data["pk"]) + "&edit=true")
def patient_signup(request):
if request.method == "POST":
patient_signup_form = PatientSignupForm(request.POST, error_class=DivErrorList)
doctor_signup_form = SignupForm(error_class=DivErrorList)
if patient_signup_form.is_valid():
username = patient_signup_form.cleaned_data["username"]
password = patient_signup_form.cleaned_data["password_1"]
#role = signup_form.cleaned_data["doctor_patient_choice"]
user = User.objects.create_user(username=username, password=password)
print patient_signup_form.cleaned_data
# Create User and Patient object.
patients_doctor_username = patient_signup_form.cleaned_data["patients_doctor_username"]
patient = Patient.objects.create(
user=user,
full_name=patient_signup_form.cleaned_data["full_name"],
telephone=patient_signup_form.cleaned_data["telephone"],
age=patient_signup_form.cleaned_data["age"],
city_of_residence=patient_signup_form.cleaned_data["city_of_residence"],
caregiver_name=patient_signup_form.cleaned_data["caregiver_name"],
caregiver_relationships=patient_signup_form.cleaned_data["caregiver_relationships"],
treatment_type=patient_signup_form.cleaned_data["treatment_type"],
tumor_type=patient_signup_form.cleaned_data["tumor_type"],
comorbidities=patient_signup_form.cleaned_data["comorbidities"],
gender=patient_signup_form.cleaned_data["gender"]
)
Doctor.objects.get(user=User.objects.get(username=patients_doctor_username)).patients.add(patient)
return HttpResponseRedirect("/signup-success/")
context = {
"title": _("Sign Up"),
"year": datetime.datetime.now().year,
"active_form": "patient",
"patient_signup_form": patient_signup_form,
"doctor_signup_form": doctor_signup_form,
}
return render(
request,
"app/sign_up.html",
context
)
def doctor_signup(request):
if request.method == "POST":
patient_signup_form = PatientSignupForm(error_class=DivErrorList)
doctor_signup_form = SignupForm(request.POST, error_class=DivErrorList)
if doctor_signup_form.is_valid():
full_name = doctor_signup_form.cleaned_data["full_name"]
username = doctor_signup_form.cleaned_data["username"]
telephone = doctor_signup_form.cleaned_data["telephone"]
password = doctor_signup_form.cleaned_data["password_1"]
#role = signup_form.cleaned_data["doctor_patient_choice"]
user = User.objects.create_user(username=username, password=password)
# Create User and Doctor object.
doctor = Doctor.objects.create(user=user, full_name=full_name, telephone=telephone)
return HttpResponseRedirect("/signup-success/")
context = {
"title": _("Sign Up"),
"year": datetime.datetime.now().year,
"active_form": "doctor",
"patient_signup_form": patient_signup_form,
"doctor_signup_form": doctor_signup_form,
}
return render(
request,
"app/sign_up.html",
context
)
def signup(request):
"""Renders the patients page."""
assert isinstance(request, HttpRequest)
patient_signup_form = PatientSignupForm(error_class=DivErrorList)
doctor_signup_form = SignupForm(error_class=DivErrorList)
context = {
"title": _("Sign Up"),
"year": datetime.datetime.now().year,
"active_form": "doctor",
"patient_signup_form": patient_signup_form,
"doctor_signup_form": doctor_signup_form,
}
return render(
request,
"app/sign_up.html",
context
)
def signup_success(request):
"""Renders the page after a user has successfully signed up."""
assert isinstance(request, HttpRequest)
context = {
"title": _("Sign Up"),
"year": datetime.datetime.now().year,
}
return render(
request,
"app/sign_up_success.html",
context
)
def messages(request):
""" Renders the messages page. """
assert isinstance(request, HttpRequest)
# Check if user is logged in. Otherwise redirect to login page.
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse("login"))
"""
How to delete a channel:
"""
"""
# Delete current demo channel.
demo_channel = settings.TWILIO_IPM_SERVICE.channels.get(sid="CH775a5cc2b8ef42db8362f101e305569a")
response = demo_channel.delete()
print "delete Demo Channel:", response # response = True on success.
# Recreate demo channel.
new_channel = settings.TWILIO_IPM_SERVICE.channels.create(friendly_name="Demo Channel", unique_name="Demo Channel", type="public")
new_channel.members.create(identity=request.user.username)
"""
"""
patient0 = "patient0"
# patient0 channel sid="CHd4c969e1d91946aeb1ebde3fa5cb85a2"
new_channel = settings.TWILIO_IPM_SERVICE.channels.create(unique_name=patient0, friendly_name=patient0, type="private")
new_channel.members.create(identity=request.user.username)
new_channel.members.create(identity=patient0)
"""
"""
# Delete all channels that aren't Demo Channel or patient0 channel
for c in settings.TWILIO_IPM_SERVICE.channels.list():
if c.sid == "CHd4c969e1d91946aeb1ebde3fa5cb85a2":
# Don't delete patient0 channel
pass
elif c.sid == "CHe75c920bb94c449da5fba883aa64db6c":
# Don't delete demo channel
pass
else:
c.delete()
"""
"""
# Update friendly name of channel
patient0_channel = settings.TWILIO_IPM_SERVICE.channels.get(sid="CHd4c969e1d91946aeb1ebde3fa5cb85a2")
patient0_channel.update(friendly_name="Patient 0")
"""
# Always allow the user to chat in the demo channel by adding to it if we haven't already been added.
demo_channel = settings.TWILIO_IPM_SERVICE.channels.get(sid="CHe75c920bb94c449da5fba883aa64db6c")
demo_json = {
"sid": str(demo_channel.sid),
"unique_name": str(demo_channel.unique_name),
"friendly_name": str(demo_channel.friendly_name),
}
#demo_channel.update(unique_name="demochannel")
member = demo_channel.members.create(identity=request.user.username)
"""
channels = []
# List the channels that the user is a member of
for c in settings.TWILIO_IPM_SERVICE.channels.list():
print "looking at", c.friendly_name, c.unique_name, c.sid
for m in c.members.list():
print "identity", m.identity
# Assuming that all twilio identities are based off of usernames
if m.identity == request.user.username:
# str() needed to get rid of u"hello" when escaping the string to javascript.
print "selected channel", c.friendly_name, c.unique_name, c.sid
channel_json = {
"sid": str(c.sid),
"unique_name": str(c.unique_name),
"friendly_name": str(c.friendly_name),
}
channels.append(channel_json)
break
print "== Channel =="
print "\tsid: ", c.sid
print "\tunique_name: ", c.unique_name
print "\tfriendly_name: ", c.friendly_name
print "\tattributes: ", c.attributes
print "\tlinks: ", c.links
print "\tmembers:"
for m in c.members.list():
print "\t\t", m.identity
token = AccessToken(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_API_KEY, settings.TWILIO_API_SECRET, request.user.username)
endpoint = "PalliAssist:" + request.user.username + ":web"
# Create an IP Messaging grant and add to token
ipm_grant = IpMessagingGrant(endpoint_id=endpoint, service_sid=settings.TWILIO_IPM_SERVICE_SID)
token.add_grant(ipm_grant)
"""
channels = []
for p in Patient.objects.all():
channels.append(get_channel(request, p))
token = get_token(request.user.username)
upload_image_form = UploadImageForm()
patients = Patient.objects.all()
context = {
"title": _("Messages"),
"message": _("Send messages."),
"upload_image_form": upload_image_form,
"year":datetime.datetime.now().year,
"patients": patients,
"channels": channels,
"token": token,
}
return render(
request,
"app/messages.html",
context
)
def get_channel(request, patient):
"""
Get list of twilio channels that the patient is a part of. If the patient
doesn't have a channel yet, create one for it. Always create the
request.user (should be the doctor) as a member in case he hasn't been
added as a member yet.
"""
channel_json = {}
# List the channels that the user is a member of
for c in settings.TWILIO_IPM_SERVICE.channels.list():
if c.unique_name == patient.user.username:
print "selected channel", c.friendly_name, c.sid
if request != None:
c.members.create(identity=request.user.username)
channel_json = {
"sid": str(c.sid),
"unique_name": str(c.unique_name),
"friendly_name": str(c.friendly_name),
}
break
if channel_json == {}:
# We didn't find a channel. Create one
new_channel = settings.TWILIO_IPM_SERVICE.channels.create(unique_name=patient.user.username, friendly_name=patient.full_name, type="private")
if request != None:
new_channel.members.create(identity=request.user.username)
new_channel.members.create(identity=patient.user.username)
new_channel.members.create(identity=patient.full_name)
channel_json = {
"sid": str(new_channel.sid),
"unique_name": str(new_channel.unique_name),
"friendly_name": str(new_channel.friendly_name),
}
return channel_json
def get_token(username):
token = AccessToken(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_API_KEY, settings.TWILIO_API_SECRET, username)
endpoint = "PalliAssist:" + username + ":web"
# Create an IP Messaging grant and add to token
ipm_grant = IpMessagingGrant(endpoint_id=endpoint, service_sid=settings.TWILIO_IPM_SERVICE_SID)
token.add_grant(ipm_grant)
return token
def token(request):
"""
Gets an access token for Twilio IP messaging. Called by messages.js.
"""
assert isinstance(request, HttpRequest)
# create a randomly generated username for the client
identity = request.GET["identity"]
# <unique app>:<user>:<device>
endpoint = "PalliAssist:" + identity + ":mobile"
# Create access token with credentials
token = AccessToken(settings.TWILIO_ACCOUNT_SID, settings.TWILIO_API_KEY, settings.TWILIO_API_SECRET, identity)
# Create an IP Messaging grant and add to token
ipm_grant = IpMessagingGrant(endpoint_id=endpoint, service_sid=settings.TWILIO_IPM_SERVICE_SID)
token.add_grant(ipm_grant)
# COMMENTED CAUSE FLASK THING - Return token info as JSON
#return jsonify(identity=identity, token=token.to_jwt())
return JsonResponse({"identity": identity, "token": token.to_jwt()})
#return JsonResponse({"identity": identity, "token": token})
def save_notes(request):
"""
Saves notes about patients. POST request from
PatientNotesForm on the patient profile page.
jQuery runs when save button is clicked.
"""
assert isinstance(request, HttpRequest)
print request.POST
doctor_notes = request.POST["notes"]
print "doctor_notes:", doctor_notes
patient_pk = request.POST["pk"]
patient = Patient.objects.get(pk=patient_pk)
patient.doctor_notes = doctor_notes
patient.save()
return JsonResponse({})
@csrf_exempt
def delete_notification(request):
"""
Creates a Notification model based on uer input.
"""
print request.POST
# Notification's PK
Notification.objects.get(pk=int(request.POST["pk"])).delete()
return JsonResponse({})
def create_notification(request):
"""
Creates a Notification model based on uer input.
"""
# Patient's PK
patient_obj = Patient.objects.get(pk=request.POST["pk"])
Notification.objects.create(
created_date=timezone.now(),
category=request.POST["category"],
text=request.POST["text"],
patient=patient_obj
)
return JsonResponse({})
def add_video(request):
"""
Creates a Video model based on url.
"""
patient_obj = Patient.objects.get(pk=request.POST["pk"])
# Delete all current videos associated with this patient.
# So there's only one video per patient.
Video.objects.filter(patient=patient_obj).delete()
video = Video.objects.create(
patient=patient_obj,
url=request.POST["url"]
)
data_message = {
"event": "NOTIFICATION",
"action": "CREATE",
"category": "VIDEO",
"data": {
"videos": serializers.serialize("json", [video])
}
}
#sendFCM(data_message, "test")
sendFCM(data_message, patient_obj.user.username)
print request
print video
return JsonResponse({})
@csrf_exempt
def delete_video(request):
"""
Creates a Video model based on url.
"""
Video.objects.get(pk=int(request.POST["pk"])).delete()
patient_obj = Patient.objects.get(pk=request.POST["pk"])
data_message = {
"event": "NOTIFICATION",
"action": "DELETE",
"category": "VIDEO",
"pk": request.POST["pk"]
}
#sendFCM(data_message, "test")
sendFCM(data_message, patient_obj.user.username)
return JsonResponse({})
@csrf_exempt
def delete_medication(request):
"""
Deletes a medication object for a patient.
"""
print "delete_medication"
print request.POST
patient_obj = Patient.objects.get(pk=int(request.POST["patient_pk"]))
# Notification's PK
Medication.objects.get(pk=int(request.POST["medication_pk"])).delete()
data_message = {
"event": "NOTIFICATION",
"action": "DELETE",
"category": "MEDICATION",
"pk": request.POST["medication_pk"]
}
#sendFCM(data_message, "test")
sendFCM(data_message, patient_obj.user.username)
return HttpResponseRedirect("/patient-profile?pk=" + str(request.POST["patient_pk"] + "#medications"))
def create_medication(request):
"""
Creates a Medication model based on user input.
"""
print
print "create_medication"
patient_obj = Patient.objects.get(pk=request.POST["pk"])
medication = Medication.objects.create(
created_date=timezone.now(),
patient=patient_obj,
name=request.POST["name"],
form=request.POST["form"],
dose=request.POST["dose"],
posology=request.POST["posology"],
rescue=request.POST["rescue"]
)
print Medication.objects.get(pk=medication.pk)
print serializers.serialize("json", Medication.objects.filter(pk=medication.pk))
data_message = {
"event": "NOTIFICATION",
"action": "CREATE",
"category": "MEDICATION",
"data": {
"medications": serializers.serialize("json", Medication.objects.filter(pk=medication.pk))
}
}
#sendFCM(data_message, "test")
sendFCM(data_message, patient_obj.user.username)
print CreateMedicationForm(request.POST)
print request
print
return JsonResponse({})
def upload_image(request):
success = False
message = ""
blob_name = ""
container_name = ""
patient_obj = User.objects.get(username=request.POST["username"]).patient
print patient_obj
if len(request.FILES.items()) > 0:
for name, temp_image in request.FILES.items():
image_obj = Image.objects.create(created_date=timezone.now(), patient=patient_obj, image=temp_image)
container_name = patient_obj.user.username
blob_name = patient_obj.user.username + "_" + str(convert_datetime_to_millis(datetime.datetime.now()))
settings.BLOCK_BLOB_SERVICE.create_container(container_name, public_access=PublicAccess.Container)
settings.BLOCK_BLOB_SERVICE.create_blob_from_path(container_name, blob_name, image_obj.image.path, content_settings=ContentSettings(content_type="image/png"))
success = True
else:
message = _("Error uploading image. Please try again.")
return JsonResponse({
"success": success,
"message": message,
"blob_name": blob_name,
"container_name": container_name
})
def create_channel(request):
"""
Saves notes about patients. POST request from
PatientNotesForm on the patient profile page.
jQuery runs when save button is clicked.
"""
assert isinstance(request, HttpRequest)
print request.POST["channel_name"]
channel_name = request.POST["channel_name"]
new_channel = settings.TWILIO_IPM_SERVICE.channels.create(friendly_name=channel_name, type="private")
new_channel.members.create(identity=request.user.username)
print new_channel
print new_channel.type
print new_channel.friendly_name
print new_channel.unique_name
return JsonResponse({})
def check_esas_alert(patient, esas):
"""
Checks to see if we need to create a dashboard alert for
this esas. If a symptom intensity has exceeded the custom
esas alert for the patient.
"""
limit = patient.esas_alert
if esas.pain >= limit:
return True
elif esas.fatigue >= limit:
return True
elif esas.nausea >= limit:
return True
elif esas.depression >= limit:
return True
elif esas.anxiety >= limit:
return True
elif esas.drowsiness >= limit:
return True
elif esas.appetite >= limit:
return True
elif esas.well_being >= limit:
return True
elif esas.lack_of_air >= limit:
return True
elif esas.insomnia >= limit:
return True
return False
def handle_completed_esas(dt, patient_obj, data):
"""
Handler for receiving a POST request form mobile, indicating
that a patient has completed a ESAS survey.
"""
print "handle_completed_esas"
print data
print type(data)
print data["pain"]
print int(data["pain"])
esas = ESASSurvey.objects.create(created_date=dt, patient=patient_obj)
esas.pain = int(data["pain"])
print "pain", esas.pain
esas.fatigue = int(data["fatigue"])
esas.nausea = int(data["nausea"])
esas.depression = int(data["depression"])
esas.anxiety = int(data["anxiety"])
esas.drowsiness = int(data["drowsiness"])
esas.appetite = int(data["appetite"])
esas.well_being = int(data["well_being"])
esas.lack_of_air = int(data["lack_of_air"])
esas.insomnia = int(data["insomnia"])
esas.fever = data["fever"]
esas.constipated = data["constipated"]
if data["constipated"] == "yes":
esas.constipated_days = int(data["constipated_days"])
esas.constipated_bothered = int(data["constipated_bothered"])
esas.vomiting = data["vomiting"]
if data["vomiting"] == "yes":
esas.vomiting_count = int(data["vomiting_count"])
esas.confused = data["confused"]
esas.save()
print esas
if check_esas_alert(patient_obj, esas):
DashboardAlert.objects.create(category=DashboardAlert.ESAS, patient=patient_obj, item_pk=esas.pk)
def handle_completed_medication(dt, patient_obj, data):
"""
Handler for receiving a POST request form mobile, indicating
that a patient has completed a medication.
"""
print data
report = MedicationReport.objects.create(created_date=dt, patient=patient_obj)
alert = False
for entry in data:
print "entry", entry
medication = Medication.objects.get(pk=entry["pk"])
report_entry = MedicationReportEntry.objects.create(medication=medication)
entry["statuses"] = json.loads(entry["statuses"])
for status in entry["statuses"]:
print status
print type(status)
if status["completed"] == "yes":
med_status = MedicationStatus.objects.create(time=status["time"], completed=True)
else:
# Make dashboard alert
alert = True
med_status = MedicationStatus.objects.create(time=status["time"], completed=False)
report_entry.statuses.add(med_status)
report.entries.add(report_entry)
if alert:
DashboardAlert.objects.create(category=DashboardAlert.MEDICATION, patient=patient_obj, item_pk=report.pk)
print medication
print medication.patient
def handle_completed_pain(dt, patient_obj, data):
"""
Handler for receiving a POST request form mobile, indicating
that a patient has completed a pain survey.
"""
print "handle_completed_pain"
pain_image = PainImages.objects.create(
patient=patient_obj,
created_date=dt,
container_name = data["container_name"],
front_blob_name=data["front_blob_name"],
back_blob_name=data["back_blob_name"],
left_blob_name=data["left_blob_name"],
right_blob_name=data["right_blob_name"],
)
print pain_image
def handle_mobile_login(data, topic):
print "handle_mobile_login"
print "username", data["username"]
print "password", data["password"]
user = authenticate(username=data["username"], password=data["password"])
print user
if user != None:
print serializers.serialize("json", [user.patient])
if user is not None:
patient_obj = user.patient
# TODO. make this the same format as handle_patient_registration
# event: LOGIN
# action: SUCCESS/ERROR
# category: etc.
data_message = {
"event": "LOGIN",
"action": "SUCCESS",
"category": "AUTHORIZATION",
"data": {
"success": "yes",
"patient": serializers.serialize("json", [patient_obj]),
"videos": serializers.serialize("json", Video.objects.filter(patient=patient_obj)),
"medications": serializers.serialize("json", Medication.objects.filter(patient=patient_obj)),
"channel_sid": get_channel(None, patient_obj)["sid"]
}
}
else:
data_message = {
"event": "LOGIN",
"action": "ERROR",
"category": "AUTHORIZATION",
"data": {
"success": "no",
}
}
sendFCM(data_message, topic)
def check_access_key(data):
# Check if valid access key. Reading from access_keys.txt from project root.
access_keys_file = open(os.path.join(settings.PROJECT_ROOT, "access_keys.txt"))
access_keys_list = access_keys_file.read().splitlines()
if data["access_key"] not in access_keys_list:
return False
return True
def handle_patient_registration(data, topic):
""" Register a patient from the mobile side """
print "handle_patient_registration"
try:
patient_user = User.objects.get(username=data["patient_username"])
# TODO: send fcm of error: patient username already exists
print "send fcm of error: patient username already exists"
data_message = {
"event": "REGISTRATION",
"action": "ERROR",
"category": "PATIENT",
"data": {
"error": "Username already taken. Please try a different one."
}
}
sendFCM(data_message, topic)
except User.DoesNotExist:
# patient username does not exist. this is to be expected
try:
doctor_user = User.objects.get(username=data["doctor_username"])
doctor = Doctor.objects.get(user=doctor_user)
# Checking for valid access key
valid_access_key = check_access_key(data)
if not valid_access_key:
# TODO: send fcm of error; invalid access key
print "send fcm of error: invalid access key"
data_message = {
"event": "REGISTRATION",
"action": "ERROR",
"category": "PATIENT",
"data": {
"error": "Invalid access key."
}
}
sendFCM(data_message, topic)
# Creating patient User object.
patient_user = User.objects.create_user(username=data["patient_username"], password=data["password"])
# Create Patient object
patient = Patient.objects.create(
user=patient_user,
full_name=data["full_name"],
telephone=data["telephone"],
age=data["age"],
city_of_residence=data["city_of_residence"],
caregiver_name=data["caregiver_name"],
treatment_type=data["treatment_type"],
gender=data["gender"],
tumor_type=data["tumor_type"],
comorbidities=data["comorbidities"],
caregiver_relationships=data["caregiver_relationships"],
)
# Add patient to that doctor's list of patients
doctor.patients.add(patient)
print "new patient:", patient
print "new patient_user:", patient_user
print "referenced doctor:", doctor
data_message = {
"event": "REGISTRATION",
"action": "SUCCESS",
"category": "PATIENT",
"data": { }
}
sendFCM(data_message, topic)
"""
"patient": serializers.serialize("json", [patient]),
"videos": serializers.serialize("json", Video.objects.filter(patient=patient)),
"medications": serializers.serialize("json", Medication.objects.filter(patient=patient)),
"""
except User.DoesNotExist:
# Doctor user does not exist
# TODO: send fcm of error: doctor username does not exist
print "send fcm of error: doctor username does not exist"
data_message = {
"event": "REGISTRATION",
"action": "ERROR",
"category": "PATIENT",
"data": {
"error": "Doctor username not found."
}
}
sendFCM(data_message, topic)
@csrf_exempt
def mobile(request):
"""
Handle FCM requests from mobile.
Format described in Meeting Minutes Feb 2, 2017
"""
assert isinstance(request, HttpRequest)
print request.POST
event = request.POST["event"]
if event == "TESTING":
return JsonResponse({"hello": "world"})
if event == "COMPLETED":
patient_username = request.POST["patient"] # TODO hardcoded to patient0 right now
patient_obj = User.objects.get(username=patient_username).patient
print "patient_obj", patient_obj
timestamp = request.POST["timestamp"] # milliseconds
dt_unaware = datetime.datetime.fromtimestamp(int(timestamp)/1000.0)
dt_aware = timezone.make_aware(dt_unaware, timezone.get_current_timezone())
print "dt_aware", dt_aware
if request.POST["category"] == "MEDICATION":
handle_completed_medication(dt_aware, patient_obj, json.loads(request.POST["data"]))
return JsonResponse({})
elif request.POST["category"] == "PAIN":
handle_completed_pain(dt_aware, patient_obj, json.loads(request.POST["data"]))
return JsonResponse({})
elif request.POST["category"] == "ESAS":
handle_completed_esas(dt_aware, patient_obj, json.loads(request.POST["data"]))
return JsonResponse({})
elif event == "LOGIN":
if request.POST["category"] == "AUTHORIZATION":
handle_mobile_login(json.loads(request.POST["data"]), request.POST["topic"])
return JsonResponse({})
elif event == "REGISTRATION":
if request.POST["category"] == "PATIENT":
print "post: REGISTRATION, PATIENT"
handle_patient_registration(json.loads(request.POST["data"]), request.POST["topic"])
return JsonResponse({})
# TODO. return an error.
return render(request, "app/blank.html")
def sendFCM(data_message, topic):
print
print "sending fcm:"
print data_message
print
result = settings.FCM_SERVICE.notify_topic_subscribers(topic_name=topic, data_message=data_message)
@csrf_exempt
def set_language(request):
"""
Set language of website.
"""
print "set_language"
language = request.POST["language"]
print "language:", language
translation.activate(language)
print "get_language:", translation.get_language()
request.session[translation.LANGUAGE_SESSION_KEY] = language
return HttpResponseRedirect(request.POST["next"])
@csrf_exempt
def sync_redcap(request):
"""
Syncs all django models with the REDCap records.
django primary_key in model == REDCap record_id in record.
"""
URL = "https://hcbredcap.com.br/api/"
#USER_TOKEN = "F2C5AEE8A2594B0A9E442EE91C56CC7A"
MEDICATION_TOKEN = "2D58F93CB3B9C8C2FD00E64BD12080A3"
ESAS_TOKEN = "7048C161E7BE6A8B86F5100D5FDA7C20"
PAIN_TOKEN = "B91EDA097393C04D5F2C1526E1F7CD37"
PATIENT_TOKEN = "A1E9884F5A90E5270385D131B66659CE"
DOCTOR_TOKEN = "8400B14C28F7E6C644E0ADB5AE5F4628"
#REDCAP_USER_PROJECT = Project(URL, USER_TOKEN)
REDCAP_MEDICATION_PROJECT = Project(URL, MEDICATION_TOKEN)
REDCAP_ESAS_PROJECT = Project(URL, ESAS_TOKEN)
REDCAP_PAIN_PROJECT = Project(URL, PAIN_TOKEN)
REDCAP_PATIENT_PROJECT = Project(URL, PATIENT_TOKEN)
REDCAP_DOCTOR_PROJECT = Project(URL, DOCTOR_TOKEN)
# Medications
medications = Medication.objects.all()
medication_data = []
for medication in medications:
medication_data.append({
"record_id": medication.pk,
"created_date": formatRedcapDate(medication.created_date),
"name": medication.name,
"form": medication.form,
"dose": medication.dose,
"posology": medication.posology,
"rescue": medication.rescue
})
medication_response = REDCAP_MEDICATION_PROJECT.import_records(medication_data, overwrite="overwrite")
print "medication models:", len(medications)
print "medication_response:", medication_response["count"]
# ESAS
esas_surveys = ESASSurvey.objects.all()
esas_data = []
for esas_survey in esas_surveys:
esas_data.append({
"record_id": esas_survey.pk,
"created_date": formatRedcapDate(esas_survey.created_date),
"patient": esas_survey.patient.pk,
"pain": esas_survey.pain,
"fatigue": esas_survey.fatigue,
"nausea": esas_survey.nausea,
"depression": esas_survey.depression,
"anxiety": esas_survey.anxiety,
"drowsiness": esas_survey.drowsiness,
"appetite": esas_survey.appetite,
"well_being": esas_survey.well_being,
"lack_of_air": esas_survey.lack_of_air,
"insomnia": esas_survey.insomnia,
"fever": esas_survey.fever,
"constipated": esas_survey.constipated,
"constipated_days": esas_survey.constipated_days,
"constipated_bothered": esas_survey.constipated_bothered,
"vomiting": esas_survey.vomiting,
"vomiting_count": esas_survey.vomiting_count,
"confused": esas_survey.confused
})
esas_response = REDCAP_ESAS_PROJECT.import_records(esas_data, overwrite="overwrite")
print "esas models:", len(esas_surveys)
print "esas_response:", esas_response["count"]
# PAIN
pain_images = PainImages.objects.all()
pain_data = []
for pain_image in pain_images:
pain_data.append({
"record_id": pain_image.pk,
"created_date": formatRedcapDate(pain_image.created_date),
"patient": pain_image.patient.pk,
"container_name": pain_image.container_name,
"front_blob_name": pain_image.front_blob_name,
"back_blob_name": pain_image.back_blob_name,
"left_blob_name": pain_image.left_blob_name,
"right_blob_name": pain_image.right_blob_name,
})
pain_response = REDCAP_PAIN_PROJECT.import_records(pain_data, overwrite="overwrite")
print "pain models:", len(pain_images)
print "pain_response:", pain_response["count"]
# Doctor
doctors = Doctor.objects.all()
doctor_data = []
for doctor in doctors:
doctor_data.append({
"record_id": doctor.pk,
"username": doctor.user.username,
"full_name": doctor.full_name,
"telephone": doctor.telephone
})
doctor_response = REDCAP_DOCTOR_PROJECT.import_records(doctor_data, overwrite="overwrite")
print "doctor models:", len(doctors)
print "doctor_response:", doctor_response["count"]
# Patient
patients = Patient.objects.all()
patient_data = []
for patient in patients:
patient_data.append({
"record_id": patient.pk,
"hospital_id": patient.hospital_id,
"username": patient.user.username,
"full_name": patient.full_name,
"telephone": patient.telephone,
"age": patient.age,
"gender": patient.gender,
"city_of_residence": patient.city_of_residence,
"caregiver_name": patient.caregiver_name,
"treatment_type": patient.treatment_type,
"esas_alert": patient.esas_alert,
"tumor_type": patient.tumor_type,
"comorbidities": patient.comorbidities,
"caregiver_relationships": patient.caregiver_relationships,
})
patient_response = REDCAP_PATIENT_PROJECT.import_records(patient_data, overwrite="overwrite")
print "patient models:", len(patients)
print "patient_response:", patient_response["count"]
return JsonResponse({})
def twoDigit(num):
""" Will return a string representation of the num with 2 digits. e.g. 6 => 06 """
if num < 10:
return "0" + str(num)
return str(num)
def formatRedcapDate(dt):
""" Formats datetime into D-M-Y H:M """
month = twoDigit(dt.month)
day = twoDigit(dt.day)
hour = twoDigit(dt.hour)
minute = twoDigit(dt.minute)
return str(dt.year) + "-" + month + "-" + day + " " + hour + ":" + minute
@csrf_exempt
def delete_dashboard_alert(request):
"""
Deletes a dashboard alert object for a patient.
"""
print request.POST
# Notification's PK
DashboardAlert.objects.get(pk=int(request.POST["pk"])).delete()
return JsonResponse({})
def convert_datetime_to_millis(dt):
return (dt.replace(tzinfo=None) - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
def admin_input(request):
return render(
request,
"app/admin_input.html",
{}
)
|
#!/usr/bin/env python
# sneeze-translator/sneeze-translator.py
from Processor import Processor
import os
import json
from dotenv import load_dotenv
import logging
import discord
import tweepy
from tweepy.streaming import StreamListener
from time import strftime
from datetime import datetime
from queue import Queue
from threading import Thread
import urllib3.exceptions
from config import create_api
load_dotenv()
DISCORD_TOKEN = os.environ['DISCORD_TOKEN']
WH_URL = os.environ['WH_URL']
USERS = [
'1321879104317132800',
'1108236843466711043',
'1430575943433691154',
'1276939850885656576',
'1345372835254992896',
'1041912206583984130',
'1325769100803534849',
'1041915108069261313',
]
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class TweetStreamListener(StreamListener):
def __init__(self, api, q=Queue()):
super().__init__()
#self.api = api
#self.me = api.me()
self.color = 0
num_worker_threads = 4
self.q = q
for i in range(num_worker_threads):
t = Thread(target=self._on_status)
t.daemon = True
t.start()
def _on_status(self):
while True:
data = self.q.get()._json
tweet_id = data["id"]
date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger.info(f"[{date_time}] Processing tweet id {tweet_id}")
if data["user"]["id_str"] in USERS:
if data["user"]["id_str"] == '1321879104317132800':
self.color = 5991156
elif data["user"]["id_str"] == '1108236843466711043':
self.color = 5991156
elif data["user"]["id_str"] == '1276939850885656576':
self.color = 255212216
elif data["user"]["id_str"] == '1345372835254992896':
self.color = 255212216
elif data["user"]["id_str"] == '1041912206583984130':
self.color = 18650111
elif data["user"]["id_str"] == '1325769100803534849':
self.color = 191182200
elif data["user"]["id_str"] == '1041915108069261313':
self.color = 191182200
elif data["user"]["id_str"] == '1430575943433691154':
self.color = 14177041
else:
self.color = 1127128
date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger.info(f"[{date_time}] Match")
p = Processor(status_tweet=data)
p.create_embed(self.color)
p.attach_field()
p.attach_media()
p.attach_translation()
p.send_message(WH_URL)
else:
date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logger.info(f"[{date_time}] No match")
self.q.task_done()
def on_status(self, tweet):
self.q.put(tweet)
def on_error(self, status):
logger.error(status)
if __name__ == "__main__":
api = create_api()
tweets_listener = TweetStreamListener(api)
stream = tweepy.Stream(api.auth, tweets_listener)
stream.filter(follow=USERS, is_async=True, stall_warnings=True)
|
import copy
import logging
import numpy as np
from pymatgen.io.vasp import Chgcar, VolumetricData, Elfcar
from cams.propressing.data_rotate import rotation_axis_by_angle, rote_index
from cams.propressing.spilt_tri import relative_location, spilt_tri_prism_z
try:
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt_installed = True
except ImportError:
print('Warning: Module matplotlib.pyplot is not installed')
plt_installed = False
# whether mayavi installed
try:
from mayavi import mlab
mayavi_installed = True
except ImportError:
mayavi_installed = False
class ElePlot:
def __init__(self, data):
"""
Parameters
----------
data: np.ndarray
"""
self.elf_data = data
self.grid = self.elf_data.shape
self.width = (1, 1, 1)
self.__logger = logging.getLogger("vaspy.log")
@staticmethod
def expand_data(data, grid, widths):
"""
Expand the data n times by widths.
Parameters
----------
data: np.ndarray
elf or chg data.
grid: tuple
numpy.shape of data.
widths: tuple of int, 3D
number of replication on x, y, z axis
According to Widths, the three-dimensional matrix was extended along the X, Y and Z axes.
Examples:
(2,1,3)
"""
# expand grid
widths = np.array(widths)
expanded_grid = np.array(grid) * widths # expanded grid
# expand eld_data matrix
expanded_data = copy.deepcopy(data)
nx, ny, nz = widths
# x axis
added_data = copy.deepcopy(expanded_data)
for i in range(nx - 1):
expanded_data = np.append(expanded_data, added_data, axis=0)
# y axis
added_data = copy.deepcopy(expanded_data)
for i in range(ny - 1):
expanded_data = np.append(expanded_data, added_data, axis=1)
# z axis
added_data = copy.deepcopy(expanded_data)
for i in range(nz - 1):
expanded_data = np.append(expanded_data, added_data, axis=2)
return expanded_data, expanded_grid
def get_width_by_pbc_max(self, pbc_offsets):
"""
Parameters
----------
pbc_offsets
each array with three pbc direction. each values just accept -1, 0, 1 !!!
such as np.array([0,0,0]) which means this point is in initial cell and no periodic offset.
such as np.array([1,0,0]) which means this point is in initial cell and x_axis periodic offset,
need a_i = a_i + 1 * a_(lattice constant) .
In this function ,the pbc_offset is just calculate the copy times of in different axis by
unified consideration.
Examples:
[np.array([1,0,0]),np.array([1,0,0]),np.array([0,-1,0]),] means the data would multiply (2,2,1)
"""
if isinstance(pbc_offsets, list):
pbc_offsets = np.concatenate(pbc_offsets, axis=0)
width = np.max(np.abs(pbc_offsets), axis=0) + 1
width = width.astype(int)
width = tuple(width.tolist())
self.width = width
return width
@staticmethod
def get_pbc_index(pbc_offset):
"""
change the pbc_offset in to relative pbc_offset.
each row is one point and col is axis.
just accept -1,0,1
Examples:
[0,1,0] [0+1,1,0]
[-1,0,0] >>> [0,0,0]
[0,1,0] [0+1,1,0]
Parameters
----------
np.ndarray
"""
tpbc_d = np.array(pbc_offset)
reformed_tpd = [tpbc_d[:, i] + 1 if np.any(tpbc_d[:, i] < 0)
else tpbc_d[:, i] for i in range(tpbc_d.shape[1])]
reformed_tpd = np.array(reformed_tpd).T
return reformed_tpd
class ChgCar(Chgcar, ElePlot):
def __init__(self, poscar, data, data_aug=None):
Chgcar.__init__(self, poscar, data, data_aug)
self.elf_data = self.data["total"]
ElePlot.__init__(self, data=self.elf_data)
@classmethod
def from_file(cls, filename):
"""Reads a CHGCAR file."""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data, data_aug=data_aug)
def get_cartesian_data(self, data=None, times=(2, 2, 2), pbc_offsets=None):
"""
rotate the angle to (90,90,90).
the return data's shape = (na,nb,nc)*times*pbc_offsets.
Parameters
----------
data: np.array
shape is (na,nb,nc)
times: tuple
Due to the np.array just could represent the cubic, we need expand the data to contain the quadrangle data.
default multiply (2,2,2).
for angle more than 135 or less than 45, the times need more than 2.
for example, the (90,90,150), the time s need (1+abs(1/cos(150/180*pi)),1+abs(1/sqrt(150/180*pi)),1)
############################################################
---------------------- --------------------------------
-oooooooooooooooooooo- --------------------------------
-oooooooooooooooooooo- -oooooooooooooooooooo-----------
-oooooooooooooooooooo- ---oooooooooooooooooooo---------
-oooooooooooooooooooo- >>> -----oooooooooooooooooooo-------
-oooooooooooooooooooo- -------oooooooooooooooooooo-----
-oooooooooooooooooooo- ---------oooooooooooooooooooo---
-oooooooooooooooooooo- -----------oooooooooooooooooooo-
---------------------- --------------------------------
---------------------- --------------------------------
############################################################
pbc_offsets:list of np.ndarray, tuple
each array with three pbc direction. each values just accept -1, 0, 1 !!!
such as np.array([0,0,0]) which means this point is in initial cell and no periodic offset.
such as np.array([1,0,0]) which means this point is in initial cell and x_axis periodic offset,
need a_i = a_i + 1 * a_(lattice constant) .
In this function ,the pbc_offset is just calculate the copy times of in different axis by
unified consideration.
Examples:
[np.array([1,0,0]),np.array([1,0,0]),np.array([0,-1,0]),] means the data would multiply (2,2,1)
since, you can just give the tuple to pass the copy times like (2,2,1).
"""
angles = self.structure.lattice.angles
if data is None:
if isinstance(pbc_offsets, tuple):
elf_data, grid = self.expand_data(self.elf_data, self.grid, pbc_offsets)
self.width = pbc_offsets
self.cartesian_data = rotation_axis_by_angle(elf_data, angles=angles, times=times)
elif pbc_offsets is not None:
widths = self.get_width_by_pbc_max(pbc_offsets)
elf_data, grid = self.expand_data(self.elf_data, self.grid, widths)
self.cartesian_data = rotation_axis_by_angle(elf_data, angles=angles, times=times)
else:
self.cartesian_data = rotation_axis_by_angle(self.elf_data, angles=angles, times=times)
else:
self.cartesian_data = rotation_axis_by_angle(data, angles=angles, times=times)
return self.cartesian_data
def trans(self, point_indexes, pbc_offset):
"""get the relative true index by add pbc_offset from the point_indexes"""
frac_coords = self.structure.frac_coords
point = frac_coords[point_indexes, :]
if pbc_offset is None:
pass
else:
point = point + self.get_pbc_index(pbc_offset)
return point
def get_tri_data_z(self, point_indexes, pbc_offset=None, z_range=None, z_absolute=True):
"""
triangular prism by given three points.
please assert the three dots are not in the same rectangle.
Parameters
----------
point_indexes: list of int
three point.
by given index of pymatgen.Structure.sites.
pbc_offset: np.ndarray
See Also: ElePlot.get_pbc_index.
z_range: tuple
range of z axis.
z_absolute
False: percent or True: index.
Returns
-------
box:np.ndarray
the shape of box,which contain the triangular prism
"""
assert hasattr(self, 'cartesian_data'), "please '.get_cartesian_data' first."
# 得到点的相对坐标,并且以0为最小值。
point = self.trans(point_indexes, pbc_offset)
# self.cartesian_data.shape 已经乘上 time 和 pbc
point = np.array(point) / np.array(self.width)
# 除去width, 使原点为原始cell*time的原点,扩展点pbc为 加!一个周期
# (对于每个方向,根据前面转换,-1 为其他点平移加一个周期,0不变,1 为本点加一个周期)
percent = rote_index(point, self.cartesian_data.shape, data_init=False, angles=self.structure.lattice.angles,
return_type="int")
maxs = np.max(percent.astype(int), axis=0)
mins = np.min(percent.astype(int), axis=0)
# 根据三点切出一个包含三棱柱的立方块,节约计算。
if z_range is None:
data_target = self.cartesian_data[mins[0]:maxs[0], mins[1]:maxs[1], :]
elif z_range == "zero_to_half":
data_target = self.cartesian_data[mins[0]:maxs[0], mins[1]:maxs[1], :int(self.cartesian_data.shape[2] / 2)]
elif z_range == "half_to_all":
data_target = self.cartesian_data[mins[0]:maxs[0], mins[1]:maxs[1], int(self.cartesian_data.shape[2] / 2):]
elif isinstance(z_range, tuple) and z_absolute:
data_target = self.cartesian_data[mins[0]:maxs[0], mins[1]:maxs[1], z_range[0]:z_range[1]]
elif isinstance(z_range, tuple) and not z_absolute:
z_r = (int(z_range[0] * self.cartesian_data.shape[2]), int(z_range[1] * self.cartesian_data.shape[2]))
data_target = self.cartesian_data[mins[0]:maxs[0], mins[1]:maxs[1], z_r[0]:z_r[1]]
else:
raise TypeError("The z_range must be None(all),'zero_to_half','half_to_all' or tuple of with int 2")
# 四方小块的位置
relative = relative_location(percent[:, (0, 1)])
site = relative * np.array(data_target.shape[:2])
# 获取bool 数组,True 为三棱柱内部的点。
data_target_tri = spilt_tri_prism_z(data_target.shape, site, z_range=(0, data_target.shape[2]),
index_percent=False)
data_result = data_target_tri * data_target
return data_result
def get_cubic_data(self, point_indexes, pbc_offset=None):
assert hasattr(self, 'cartesian_data'), "please '.get_cartesian_data' first."
point = self.trans(point_indexes, pbc_offset)
percent = rote_index(point, self.cartesian_data.shape, data_init=False, angles=self.structure.lattice.angles,
return_type="int")
maxs = np.max(percent.astype(int), axis=0)
mins = np.min(percent.astype(int), axis=0)
data_result = self.cartesian_data[mins[0]:maxs[0], mins[1]:maxs[1], mins[2]:maxs[2]]
return data_result
def plot_field(self, show_mode="show", data=None, **kwargs):
"""
use mayavi.mlab to plot 3d field.
Parameter
---------
kwargs: {
'vmin' : ,min ,
'vmax' : ,max,
'axis_cut': ,cut size,
'nct' : int, number of contours,
'opacity' : float, opacity of contour,
'widths' : tuple of int
number of replication on x, y, z axis,
}
"""
if not mayavi_installed:
self.__logger.warning("Mayavi is not installed on your device.")
return
# set parameters
vmin = kwargs['vmin'] if 'vmin' in kwargs else 0.0
vmax = kwargs['vmax'] if 'vmax' in kwargs else 1.0
axis_cut = kwargs['axis_cut'] if 'axis_cut' in kwargs else 'z'
nct = kwargs['nct'] if 'nct' in kwargs else 3
widths = kwargs['widths'] if 'widths' in kwargs else (1, 1, 1)
times = kwargs['times'] if 'times' in kwargs else (2, 2, 2)
if data is None:
elf_data, grid = self.expand_data(self.elf_data, self.grid, widths)
elf_data = self.get_cartesian_data(data=elf_data, times=times)
else:
elf_data = data
# create pipeline
field = mlab.pipeline.scalar_field(elf_data) # data source
mlab.pipeline.volume(field, vmin=vmin, vmax=vmax) # put data into volumn to visualize
# cut plane
if axis_cut in ['Z', 'z']:
plane_orientation = 'z_axes'
elif axis_cut in ['Y', 'y']:
plane_orientation = 'y_axes'
elif axis_cut in ['X', 'x']:
plane_orientation = 'x_axes'
else:
raise NotImplemented
cut = mlab.pipeline.scalar_cut_plane(
field.children[0], plane_orientation=plane_orientation)
cut.enable_contours = True # 开启等值线显示
cut.contour.number_of_contours = nct
mlab.show()
# mlab.savefig('field.png', size=(2000, 2000))
if show_mode == 'show':
mlab.show()
elif show_mode == 'save':
mlab.savefig('mlab_contour3d.png')
else:
raise ValueError('Unrecognized show mode parameter : ' +
show_mode)
return None
class ElfCar(Elfcar, ChgCar):
def __init__(self, poscar, data):
Elfcar.__init__(self, poscar, data)
ChgCar.__init__(self, poscar, data)
@classmethod
def from_file(cls, filename):
"""
Reads a CHGCAR file.
:param filename: Filename
:return: Chgcar
"""
(poscar, data) = VolumetricData.parse_file(filename)
return cls(poscar, data)
|
from os import getenv, walk, path
from platform import system
read = open("Game.spec", "rt", encoding="utf-8").readlines()
for index, line in enumerate(read):
if line.startswith("assets = [('"):
newassets = []
for (dirpath, dirnames, filenames) in walk("assets"):
for dirname in dirnames:
asset = (path.join(dirpath, dirname, "*"), path.join(dirpath, dirname))
newassets.append(asset)
read[index] = f"assets = {newassets}\n"
if line.startswith(" ['src"):
read[index] = f" ['{path.join('src', 'Game.py')}'],\n"
if line.startswith(" name="):
version = getenv('VERSION', "0.0.0")
read[index] = f" name='Mine-Mine-Mine-v{version}-{system()}',\n"
with open("Game.spec", "wt", encoding="utf-8") as file:
file.writelines(read)
|
import os
import tomodachi
from typing import Any, Dict
from tomodachi import aws_sns_sqs, aws_sns_sqs_publish
from tomodachi.discovery import AWSSNSRegistration
from tomodachi.protocol import JsonBase
@tomodachi.service
class ExampleAWSSNSSQSService(tomodachi.Service):
name = 'example_aws_sns_sqs_service'
log_level = 'INFO'
uuid = os.environ.get('SERVICE_UUID')
# Build own "discovery" functions, to be run on start and stop
# See tomodachi/discovery/aws_sns_registration.py for example
discovery = [AWSSNSRegistration]
# The message protocol class defines how a message should be processed when sent and received
# See tomodachi/protocol/json_base.py for a basic example using JSON and transferring some metadata
message_protocol = JsonBase
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = {
'aws_sns_sqs': {
'region_name': None, # specify AWS region (example: 'eu-west-1')
'aws_access_key_id': None, # specify AWS access key (example: 'AKIAXNTIENCJIY2STOCI')
'aws_secret_access_key': None # specify AWS secret key (example: 'f7sha92hNotarealsecretkeyn29ShnSYQi3nzgA')
},
'aws_endpoint_urls': {
'sns': None, # For example 'http://localhost:4575' if localstack is used for testing
'sqs': None # For example 'http://localhost:4576' if localstack is used for testing
}
}
@aws_sns_sqs('example-route1')
async def route1a(self, data: Any) -> None:
self.log('Received data (function: route1a) - "{}"'.format(data))
@aws_sns_sqs('example-route1')
async def route1b(self, data: Any) -> None:
self.log('Received data (function: route1b) - "{}"'.format(data))
@aws_sns_sqs('example-route2')
async def route2(self, data: Any) -> None:
self.log('Received data (function: route2) - "{}"'.format(data))
@aws_sns_sqs('example-#')
async def wildcard_route(self, metadata: Dict, data: Any) -> None:
self.log('Received data (function: wildcard_route, topic: {}) - "{}"'.format(metadata.get('topic', ''), data))
async def _started_service(self) -> None:
async def publish(data: Any, topic: str) -> None:
self.log('Publish data "{}"'.format(data))
await aws_sns_sqs_publish(self, data, topic=topic, wait=False)
await publish('友達', 'example-route1')
await publish('other data', 'example-route2')
|
values = list()
values.append(5)
values.append(9)
values.append(4)
values.remove(4)
for c, v in enumerate(values):
print(f'Na posição {c} achei o valor {v}!')
print('Cheguei no final!')
|
import telnetlib
import requests
from loguru import logger
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": 'max-age=0',
"Connection": "keep-alive",
"sec-ch-ua": '"Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "Windows",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36",
}
proxy_list = ['222.66.202.6:80']
for proxy in proxy_list:
try:
ip_port = proxy.split(":")
telnetlib.Telnet(ip_port[0], ip_port[1], timeout=3)
proxies = {"http": f"http://{proxy}", "https": f"https://{proxy}"}
response = requests.get(url="http://icanhazip.com", proxies=proxies, headers=headers)
logger.info(response)
if response.status_code == 200:
logger.info(response.status_code)
if ip_port[0] == str(response.text.strip()):
logger.info(f"=========valid proxy:{proxy}")
except Exception as e:
logger.info(f"*********invalid proxy:{proxy}")
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-25 12:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0002_authenticationtoken'),
]
operations = [
migrations.AddField(
model_name='scheduledpost',
name='attached_media',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
]
|
#=============================================================================#
# #
# MODIFIED: 15-Jan-2019 by C. Purcell #
# #
#=============================================================================#
import cv2
#-----------------------------------------------------------------------------#
class MeanPreprocessor:
def __init__(self, rMean, gMean, bMean, rgbOrder=True):
self.rMean = rMean
self.gMean = gMean
self.bMean = bMean
self.rgbOrder = rgbOrder
def preprocess(self, image):
# Split the image into its respective RGB channels
if self.rgbOrder:
(R, G, B) = cv2.split(image.astype("float32"))
else:
(B, G, R) = cv2.split(image.astype("float32"))
# Subtract the means for each channel
R -= self.rMean
G -= self.gMean
B -= self.bMean
# Merge the channels back together and return the image
if self.rgbOrder:
return cv2.merge([R, G, B])
else:
return cv2.merge([B, G, R])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for WikidataStuff."""
from __future__ import unicode_literals
import json
import mock
import os
import unittest
import pywikibot
from wikidatastuff import wikidata_stuff
from wikidatastuff.reference import Reference # replace with mocks
from wikidatastuff.statement import Statement # replace with mocks
from wikidatastuff.qualifier import Qualifier # replace with mocks
class BaseTest(unittest.TestCase):
"""Base test setting loading offline data and setting up patchers."""
def setUp(self):
"""Setup test."""
self.repo = pywikibot.Site('test', 'wikidata')
self.wd_page = pywikibot.ItemPage(self.repo, None)
data_dir = os.path.join(os.path.split(__file__)[0], 'data')
with open(os.path.join(data_dir, 'Q27399.json')) as f:
self.wd_page._content = json.load(f).get('entities').get('Q27399')
self.wd_page._content['id'] = '-1' # override id used in demo file
self.wd_page.get()
self.wd_stuff = wikidata_stuff.WikidataStuff(self.repo)
# silence output
output_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.output')
warning_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.warning')
self.mock_output = output_patcher.start()
self.mock_warning = warning_patcher.start()
self.addCleanup(output_patcher.stop)
self.addCleanup(warning_patcher.stop)
class TestAddDescription(BaseTest):
"""Test add_description()."""
def setUp(self):
super(TestAddDescription, self).setUp()
description_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.WikidataStuff.'
'add_multiple_descriptions')
self.mock_add_multiple_descriptions = description_patcher.start()
self.addCleanup(description_patcher.stop)
def test_add_description_send_to_add_multiple(self):
"""Test add_multiple is called with right values and defaults."""
lang = 'fi'
text = 'fi_desc'
self.wd_stuff.add_description(lang, text, self.wd_page)
self.mock_add_multiple_descriptions.assert_called_once_with(
{'fi': 'fi_desc'}, self.wd_page, overwrite=False, summary=None)
def test_add_description_send_all_params_to_add_multiple(self):
"""Test add_multiple is called with all parameters."""
lang = 'fi'
text = 'fi_desc'
self.wd_stuff.add_description(
lang, text, self.wd_page, True, 'test')
self.mock_add_multiple_descriptions.assert_called_once_with(
{'fi': 'fi_desc'}, self.wd_page, overwrite=True, summary='test')
class TestAddMultipleDescriptions(BaseTest):
"""Test add_multiple_descriptions()."""
def setUp(self):
super(TestAddMultipleDescriptions, self).setUp()
# override loaded descriptions
self.wd_page.descriptions = {u'en': u'en_desc', u'sv': u'sv_desc'}
description_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.ItemPage.editDescriptions')
self.mock_edit_description = description_patcher.start()
self.addCleanup(description_patcher.stop)
def test_add_multiple_descriptions_empty(self):
"""Test calling without data."""
data = {}
self.wd_stuff.add_multiple_descriptions(data, self.wd_page)
self.mock_edit_description.assert_not_called()
def test_add_multiple_descriptions_no_descriptions(self):
"""Test adding description when no descriptions present."""
self.wd_page.descriptions = {}
data = {'fi': 'fi_desc'}
self.wd_stuff.add_multiple_descriptions(data, self.wd_page)
self.mock_edit_description.assert_called_once_with(
{'fi': 'fi_desc'},
summary=u'Added [fi] description to [[-1]]'
)
def test_add_multiple_descriptions_no_language(self):
"""Test adding description when language not present."""
data = {'fi': 'fi_desc'}
self.wd_stuff.add_multiple_descriptions(data, self.wd_page)
self.mock_edit_description.assert_called_once_with(
{'fi': 'fi_desc'},
summary=u'Added [fi] description to [[-1]]'
)
def test_add_multiple_descriptions_has_language(self):
"""Test adding description when already present."""
data = {'sv': 'sv_new_desc'}
self.wd_stuff.add_multiple_descriptions(data, self.wd_page)
self.mock_edit_description.assert_not_called()
def test_add_multiple_descriptions_overwrite(self):
"""Test overwriting description when already present."""
data = {'sv': 'sv_new_desc'}
self.wd_stuff.add_multiple_descriptions(
data, self.wd_page, overwrite=True)
self.mock_edit_description.assert_called_once_with(
{'sv': 'sv_new_desc'},
summary=u'Added [sv] description to [[-1]]'
)
def test_add_multiple_descriptions_with_summary(self):
"""Test appending to the summary."""
data = {'fi': 'fi_desc'}
self.wd_stuff.add_multiple_descriptions(
data, self.wd_page, summary='TEXT')
self.mock_edit_description.assert_called_once_with(
{'fi': 'fi_desc'},
summary=u'Added [fi] description to [[-1]], TEXT'
)
def test_add_multiple_descriptions_many_add_all(self):
"""Test sending multiple where all are new."""
data = {
'fi': 'fi_desc',
'de': 'de_desc'
}
self.wd_stuff.add_multiple_descriptions(data, self.wd_page)
self.mock_edit_description.assert_called_once_with(
{'fi': 'fi_desc', 'de': 'de_desc'},
summary=u'Added [de, fi] description to [[-1]]'
)
def test_add_multiple_descriptions_many_add_some(self):
"""Test sending multiple where only one is new."""
data = {
'fi': 'fi_desc',
'sv': 'sv_new_desc'
}
self.wd_stuff.add_multiple_descriptions(data, self.wd_page)
self.mock_edit_description.assert_called_once_with(
{'fi': 'fi_desc'},
summary=u'Added [fi] description to [[-1]]'
)
class TestAddLabelOrAlias(BaseTest):
"""Test add_label_or_alias()."""
def setUp(self):
super(TestAddLabelOrAlias, self).setUp()
description_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.WikidataStuff.'
'add_multiple_label_or_alias')
self.mock_add_multiple_label_or_alias = description_patcher.start()
self.addCleanup(description_patcher.stop)
def test_add_description_send_to_add_multiple(self):
"""Test add_multiple is called with right values and defaults."""
lang = 'fi'
text = 'fi_label'
self.wd_stuff.add_label_or_alias(lang, text, self.wd_page)
self.mock_add_multiple_label_or_alias.assert_called_once_with(
{'fi': 'fi_label'}, self.wd_page, case_sensitive=False,
summary=None)
def test_add_description_send_all_params_to_add_multiple(self):
"""Test add_multiple is called with all parameters."""
lang = 'fi'
text = 'fi_label'
self.wd_stuff.add_label_or_alias(
lang, text, self.wd_page, 'test', True)
self.mock_add_multiple_label_or_alias.assert_called_once_with(
{'fi': 'fi_label'}, self.wd_page, case_sensitive=True,
summary='test')
class TestAddMultipleLabelOrAlias(BaseTest):
"""Test add_multiple_label_or_alias()."""
def setUp(self):
super(TestAddMultipleLabelOrAlias, self).setUp()
# override loaded labels and aliases
self.wd_page.labels = {'en': 'en_label', 'sv': 'sv_label'}
self.wd_page.aliases = {'en': ['en_alias_1', ]}
alias_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.ItemPage.editAliases')
label_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.ItemPage.editLabels')
self.mock_edit_alias = alias_patcher.start()
self.mock_edit_label = label_patcher.start()
self.addCleanup(alias_patcher.stop)
self.addCleanup(label_patcher.stop)
def test_add_multiple_label_or_alias_empty(self):
"""Test calling without data."""
data = {}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_not_called()
def test_add_multiple_label_or_alias_no_language(self):
"""Test adding label when language not present."""
self.wd_page.labels = None
self.wd_page.aliases = None
data = {'fi': 'fi_label'}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_called_once_with(
{'fi': 'fi_label'},
summary='Added [fi] label to [[-1]]'
)
self.mock_edit_alias.assert_not_called()
def test_add_multiple_label_or_alias_list_of_names(self):
"""Test adding label when language not present."""
self.wd_page.labels = None
self.wd_page.aliases = None
data = {'fi': ['fi_label1', 'fi_label2', 'fi_label3']}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_called_once_with(
{'fi': 'fi_label1'},
summary='Added [fi] label to [[-1]]'
)
self.mock_edit_alias.assert_called_once_with(
{'fi': ['fi_label2', 'fi_label3']},
summary='Added [fi] alias to [[-1]]'
)
def test_add_multiple_label_or_alias_has_same_label(self):
data = {'sv': 'sv_label'}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_not_called()
def test_add_multiple_label_or_alias_has_other_label(self):
self.wd_page.aliases = None
data = {'sv': 'sv_label_2'}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_called_once_with(
{'sv': ['sv_label_2', ]},
summary='Added [sv] alias to [[-1]]'
)
def test_add_multiple_label_or_alias_has_same_alias(self):
data = {'en': 'en_alias_1'}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_not_called()
def test_add_multiple_label_or_alias_has_other_alias(self):
data = {'en': 'en_alias_2'}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_called_once_with(
{'en': ['en_alias_1', 'en_alias_2']},
summary='Added [en] alias to [[-1]]'
)
def test_add_multiple_label_or_alias_not_case_sensitive(self):
data = {'sv': 'SV_label'}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_not_called()
def test_add_multiple_label_or_alias_case_sensitive(self):
self.wd_page.aliases = None
data = {'sv': 'SV_label'}
self.wd_stuff.add_multiple_label_or_alias(
data, self.wd_page, case_sensitive=True)
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_called_once_with(
{'sv': ['SV_label', ]},
summary='Added [sv] alias to [[-1]]'
)
def test_add_multiple_label_or_alias_with_summary(self):
self.wd_page.aliases = None
data = {'sv': 'sv_label_2'}
self.wd_stuff.add_multiple_label_or_alias(
data, self.wd_page, summary='TEXT')
self.mock_edit_label.assert_not_called()
self.mock_edit_alias.assert_called_once_with(
{'sv': ['sv_label_2', ]},
summary='Added [sv] alias to [[-1]], TEXT'
)
def test_add_multiple_label_or_alias_many_add_all_labels(self):
"""Test sending multiple where all are new."""
self.wd_page.labels = None
self.wd_page.aliases = None
data = {
'fi': 'fi_label',
'de': 'de_label'
}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_called_once_with(
{'fi': 'fi_label', 'de': 'de_label'},
summary=u'Added [de, fi] label to [[-1]]'
)
def test_add_multiple_label_or_alias_many_add_all_aliases(self):
"""Test sending multiple where all are new."""
data = {
'en': 'en_alias_2',
'sv': 'sv_label_2'
}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_alias.assert_called_once_with(
{'en': ['en_alias_1', 'en_alias_2'], 'sv': ['sv_label_2', ]},
summary=u'Added [en, sv] alias to [[-1]]'
)
def test_add_multiple_label_or_alias_many_add_mix(self):
"""Test sending multiple where some are alias and some labels."""
self.wd_page.labels = {'sv': 'sv_label'}
self.wd_page.aliases = None
data = {
'fi': 'fi_label',
'sv': 'sv_label_2'
}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_called_once_with(
{'fi': 'fi_label', 'sv': 'sv_label'},
summary=u'Added [fi] label to [[-1]]'
)
self.mock_edit_alias.assert_called_once_with(
{'sv': ['sv_label_2', ]},
summary='Added [sv] alias to [[-1]]'
)
def test_add_multiple_label_or_alias_many_add_some(self):
"""Test sending multiple where only one is new."""
self.wd_page.labels = {'sv': 'sv_label'}
data = {
'fi': 'fi_label',
'sv': 'sv_label'
}
self.wd_stuff.add_multiple_label_or_alias(data, self.wd_page)
self.mock_edit_label.assert_called_once_with(
{'fi': 'fi_label', 'sv': 'sv_label'},
summary=u'Added [fi] label to [[-1]]'
)
class TestHasClaim(BaseTest):
"""Test has_claim()."""
def test_has_claim_prop_not_present(self):
prop = 'P0'
itis = 'A string'
self.assertEqual(
self.wd_stuff.has_claim(prop, itis, self.wd_page),
[])
def test_has_claim_prop_but_not_value(self):
prop = 'P174'
itis = 'An unknown string'
self.assertEqual(
self.wd_stuff.has_claim(prop, itis, self.wd_page),
[])
def test_has_claim_simple_match(self):
prop = 'P174'
itis = 'A string'
expected = 'Q27399$3f62d521-4efe-e8de-8f2d-0d8a10e024cf'
hits = self.wd_stuff.has_claim(prop, itis, self.wd_page)
self.assertEqual(len(hits), 1)
self.assertEqual(
hits[0].toJSON()['id'],
expected)
def test_has_claim_match_independent_of_reference(self):
prop = 'P174'
itis = 'A string with a reference'
expected = 'Q27399$ef9f73ce-4cd5-13e5-a0bf-4ad835d8f9c3'
hits = self.wd_stuff.has_claim(prop, itis, self.wd_page)
self.assertEqual(len(hits), 1)
self.assertEqual(
hits[0].toJSON()['id'],
expected)
def test_has_claim_match_item_type(self):
prop = 'P84'
itis = pywikibot.ItemPage(self.repo, 'Q1341')
expected = 'Q27399$58a0a8bc-46e4-3dc6-16fe-e7c364103c9b'
hits = self.wd_stuff.has_claim(prop, itis, self.wd_page)
self.assertEqual(len(hits), 1)
self.assertEqual(
hits[0].toJSON()['id'],
expected)
def test_has_claim_match_wbtime_type(self):
prop = 'P74'
itis = pywikibot.WbTime(year=2016, month=11, day=22, site=self.repo)
function = 'wikidatastuff.wikidata_stuff.WikidataStuff.compare_wbtime_claim'
with mock.patch(function, autospec=True) as mock_compare_WbTime:
self.wd_stuff.has_claim(prop, itis, self.wd_page)
mock_compare_WbTime.assert_called_once_with(
self.wd_stuff, itis, itis)
def test_has_claim_match_independent_of_qualifier(self):
prop = 'P174'
itis = 'A string entry with a qualifier'
expected = 'Q27399$50b7cccb-4e9d-6f5d-d9c9-6b85d771c2d4'
hits = self.wd_stuff.has_claim(prop, itis, self.wd_page)
self.assertEqual(len(hits), 1)
self.assertEqual(
hits[0].toJSON()['id'],
expected)
def test_has_claim_match_multiple(self):
prop = 'P664'
itis = 'Duplicate_string'
expected_1 = 'Q27399$221e4451-46d7-8c4a-53cb-47a4e0d09660'
expected_2 = 'Q27399$a9b83de1-49d7-d033-939d-f430a232ffd0'
hits = self.wd_stuff.has_claim(prop, itis, self.wd_page)
self.assertEqual(len(hits), 2)
self.assertEqual(
hits[0].toJSON()['id'],
expected_1)
self.assertEqual(
hits[1].toJSON()['id'],
expected_2)
class TestHasQualifier(BaseTest):
"""Test has_qualifier()."""
def setUp(self):
super(TestHasQualifier, self).setUp()
self.claim_no_qual = self.wd_page.claims['P174'][2]
# one qualifier: P174:A qualifier
self.claim_one_qual = self.wd_page.claims['P174'][0]
# two qualifiers: P174:A qualifier, P664:Another qualifier
self.claim_two_quals_diff_p = self.wd_page.claims['P174'][4]
# two qualifiers: P174:A qualifier, P174:Another qualifier
self.claim_two_quals_same_p = self.wd_page.claims['P174'][5]
# load three claims to use when making references
self.qual_1 = Qualifier('P174', 'A qualifier')
self.qual_2 = Qualifier('P664', 'Another qualifier')
self.qual_3 = Qualifier('P174', 'Another qualifier')
self.unmatched_val = Qualifier('P174', 'Unmatched')
self.unmatched_p = Qualifier('P0', 'A qualifier')
def test_has_qualifier_no_qualifier(self):
self.assertFalse(
self.wd_stuff.has_qualifier(
self.qual_1, self.claim_no_qual))
def test_has_qualifier_different_qualifier(self):
self.assertFalse(
self.wd_stuff.has_qualifier(
self.qual_2, self.claim_one_qual))
def test_has_qualifier_different_qualifier_prop(self):
self.assertFalse(
self.wd_stuff.has_qualifier(
self.unmatched_p, self.claim_one_qual))
def test_has_qualifier_different_qualifier_value(self):
self.assertFalse(
self.wd_stuff.has_qualifier(
self.unmatched_val, self.claim_one_qual))
def test_has_qualifier_same_qualifier(self):
self.assertTrue(
self.wd_stuff.has_qualifier(
self.qual_1, self.claim_one_qual))
def test_has_qualifier_multiple_qualifiers_different_prop(self):
claim = self.claim_two_quals_diff_p
expect_qual_1 = self.qual_1
expect_qual_2 = self.qual_2
unexpected_qual = self.unmatched_val
self.assertTrue(self.wd_stuff.has_qualifier(expect_qual_1, claim))
self.assertTrue(self.wd_stuff.has_qualifier(expect_qual_2, claim))
self.assertFalse(self.wd_stuff.has_qualifier(unexpected_qual, claim))
def test_has_qualifier_multiple_qualifiers_same_prop(self):
claim = self.claim_two_quals_same_p
expect_qual_1 = self.qual_1
expect_qual_2 = self.qual_3
unexpected_qual = self.unmatched_val
self.assertTrue(self.wd_stuff.has_qualifier(expect_qual_1, claim))
self.assertTrue(self.wd_stuff.has_qualifier(expect_qual_2, claim))
self.assertFalse(self.wd_stuff.has_qualifier(unexpected_qual, claim))
class TestHasAllQualifiers(BaseTest):
"""Test has_all_qualifiers()."""
def setUp(self):
super(TestHasAllQualifiers, self).setUp()
self.quals = []
# load claims
self.claim_no_qual = self.wd_page.claims['P174'][2]
# two qualifiers: P174:A qualifier, P664:Another qualifier
self.claim = self.wd_page.claims['P174'][4]
# load qualifiers
self.qual_1 = Qualifier('P174', 'A qualifier')
self.qual_2 = Qualifier('P664', 'Another qualifier')
self.unmatched = Qualifier('P174', 'Unmatched')
def test_has_all_qualifiers_none(self):
with self.assertRaises(TypeError):
self.wd_stuff.has_all_qualifiers(None, self.claim)
def test_has_all_qualifiers_empty(self):
expected = (True, True)
self.assertEqual(
self.wd_stuff.has_all_qualifiers(self.quals, self.claim_no_qual),
expected)
def test_has_all_qualifiers_has_all(self):
self.quals.append(self.qual_1)
self.quals.append(self.qual_2)
expected = (True, True)
self.assertEqual(
self.wd_stuff.has_all_qualifiers(self.quals, self.claim),
expected)
def test_has_all_qualifiers_has_all_but_one(self):
self.quals.append(self.qual_1)
self.quals.append(self.qual_2)
self.quals.append(self.unmatched)
expected = (False, False)
self.assertEqual(
self.wd_stuff.has_all_qualifiers(self.quals, self.claim),
expected)
def test_has_all_qualifiers_has_all_plus_one(self):
self.quals.append(self.qual_1)
expected = (False, True)
self.assertEqual(
self.wd_stuff.has_all_qualifiers(self.quals, self.claim),
expected)
class TestAddReference(BaseTest):
"""Test add_reference()."""
def setUp(self):
super(TestAddReference, self).setUp()
self.claim_no_ref = self.wd_page.claims['P174'][2]
# one ref with two claims: P174:ref_1, P664:ref_2
self.claim_one_ref = self.wd_page.claims['P174'][1]
# two refs each with one claim: P174:ref_1, P174:ref_2
self.claim_two_refs = self.wd_page.claims['P174'][3]
# load three claims to use when making references
self.ref_1 = pywikibot.Claim(self.repo, 'P174')
self.ref_1.setTarget('ref_1')
self.ref_2 = pywikibot.Claim(self.repo, 'P664')
self.ref_2.setTarget('ref_2')
self.unmatched_ref = pywikibot.Claim(self.repo, 'P174')
self.unmatched_ref.setTarget('Unmatched_ref')
sources_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.Claim.addSources')
self.mock_add_sources = sources_patcher.start()
self.addCleanup(sources_patcher.stop)
def test_add_reference_empty_ref(self):
self.assertFalse(
self.wd_stuff.add_reference(item=None, claim=None, ref=None))
self.mock_add_sources.assert_not_called()
def test_add_reference_test_no_prior(self):
reference = Reference(source_test=self.ref_1)
self.assertTrue(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_no_ref,
ref=reference))
self.mock_add_sources.assert_called_once_with(
[self.ref_1], summary=None)
def test_add_reference_notest_no_prior(self):
reference = Reference(source_notest=self.ref_1)
self.assertTrue(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_no_ref,
ref=reference))
self.mock_add_sources.assert_called_once_with(
[self.ref_1], summary=None)
def test_add_reference_has_ref_and_one_more(self):
reference = Reference(source_test=self.ref_1)
self.assertFalse(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_one_ref,
ref=reference))
self.mock_add_sources.assert_not_called()
def test_add_reference_has_both(self):
reference = Reference(
source_test=self.ref_1, source_notest=self.ref_2)
self.assertFalse(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_one_ref,
ref=reference))
self.mock_add_sources.assert_not_called()
def test_add_reference_has_test_only(self):
reference = Reference(
source_test=self.ref_1, source_notest=self.unmatched_ref)
self.assertFalse(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_one_ref,
ref=reference))
self.mock_add_sources.assert_not_called()
def test_add_reference_has_notest_only(self):
reference = Reference(
source_test=self.unmatched_ref, source_notest=self.ref_2)
self.assertTrue(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_one_ref,
ref=reference))
self.mock_add_sources.assert_called_once_with(
[self.unmatched_ref, self.ref_2], summary=None)
def test_add_reference_with_summary(self):
reference = Reference(source_test=self.ref_1)
self.assertTrue(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_no_ref,
ref=reference,
summary='test_me'))
self.mock_add_sources.assert_called_once_with(
[self.ref_1], summary='test_me')
def test_add_reference_detect_when_multple_sources(self):
reference = Reference(source_test=self.ref_1)
self.assertFalse(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_two_refs,
ref=reference))
self.mock_add_sources.assert_not_called()
def test_add_reference_add_when_multple_sources(self):
reference = Reference(source_test=self.ref_2)
self.assertTrue(
self.wd_stuff.add_reference(
item=self.wd_page,
claim=self.claim_two_refs,
ref=reference))
self.mock_add_sources.assert_called_once_with(
[self.ref_2], summary=None)
class TestAddQualifier(BaseTest):
"""Test add_qualifier()."""
def setUp(self):
super(TestAddQualifier, self).setUp()
self.claim = self.wd_page.claims['P174'][2]
self.qual = Qualifier('P174', 'A qualifier')
qualifier_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.Claim.addQualifier')
make_claim_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.WikidataStuff.make_simple_claim')
has_qualifier_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.WikidataStuff.has_qualifier')
self.mock_add_qualifier = qualifier_patcher.start()
self.mock_make_simple_claim = make_claim_patcher.start()
self.mock_has_qualifier = has_qualifier_patcher.start()
self.addCleanup(qualifier_patcher.stop)
self.addCleanup(make_claim_patcher.stop)
self.addCleanup(has_qualifier_patcher.stop)
def test_add_qualifier_empty_qual(self):
with self.assertRaises(pywikibot.Error) as e:
self.wd_stuff.add_qualifier(item=None, claim=None, qual=None)
self.assertEqual(
str(e.exception),
'Cannot call add_qualifier() without a qualifier.')
self.mock_has_qualifier.assert_not_called()
self.mock_make_simple_claim.assert_not_called()
self.mock_add_qualifier.assert_not_called()
def test_add_qualifier_has(self):
self.mock_has_qualifier.return_value = True
self.assertFalse(
self.wd_stuff.add_qualifier(
item=self.wd_page,
claim=self.claim,
qual=self.qual))
self.mock_has_qualifier.assert_called_once_with(
self.qual, self.claim)
self.mock_make_simple_claim.assert_not_called()
self.mock_add_qualifier.assert_not_called()
def test_add_qualifier_has_not(self):
self.mock_has_qualifier.return_value = False
self.mock_make_simple_claim.return_value = 'test'
self.assertTrue(
self.wd_stuff.add_qualifier(
item=self.wd_page,
claim=self.claim,
qual=self.qual))
self.mock_has_qualifier.assert_called_once_with(
self.qual, self.claim)
self.mock_make_simple_claim.assert_called_once_with(
self.qual.prop, self.qual.itis)
self.mock_add_qualifier.assert_called_once_with(
'test', summary=None)
def test_add_qualifier_with_summary(self):
self.mock_has_qualifier.return_value = False
self.mock_make_simple_claim.return_value = 'test'
self.assertTrue(
self.wd_stuff.add_qualifier(
item=self.wd_page,
claim=self.claim,
qual=self.qual,
summary='test_me'))
self.mock_has_qualifier.assert_called_once_with(
self.qual, self.claim)
self.mock_make_simple_claim.assert_called_once_with(
self.qual.prop, self.qual.itis)
self.mock_add_qualifier.assert_called_once_with(
'test', summary='test_me')
class TestAddNewClaim(BaseTest):
"""Test add_new_claim()."""
def setUp(self):
super(TestAddNewClaim, self).setUp()
# mock all writing calls
add_qualifier_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.WikidataStuff.add_qualifier')
add_reference_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.WikidataStuff.add_reference')
add_claim_patcher = mock.patch(
'wikidatastuff.wikidata_stuff.pywikibot.ItemPage.addClaim')
self.mock_add_qualifier = add_qualifier_patcher.start()
self.mock_add_reference = add_reference_patcher.start()
self.mock_add_claim = add_claim_patcher.start()
self.addCleanup(add_qualifier_patcher.stop)
self.addCleanup(add_reference_patcher.stop)
self.addCleanup(add_claim_patcher.stop)
# defaults
self.ref = None
self.prop = 'P509' # an unused property of type string
self.value = 'A statement'
self.qual_1 = Qualifier('P174', 'A qualifier')
self.qual_2 = Qualifier('P664', 'Another qualifier')
self.mock_ref_1 = mock.create_autospec(Reference)
self.mock_ref_2 = mock.create_autospec(Reference)
def test_add_new_claim_new_property(self):
statement = Statement(self.value)
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_called_once()
self.mock_add_qualifier.assert_not_called()
self.mock_add_reference.assert_called_once()
def test_add_new_claim_old_property_new_value(self):
self.prop = 'P174'
statement = Statement(self.value)
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_called_once()
self.mock_add_qualifier.assert_not_called()
self.mock_add_reference.assert_called_once()
def test_add_new_claim_old_property_old_value(self):
self.prop = 'P174'
self.value = 'A string'
statement = Statement(self.value)
expected_claim = 'Q27399$3f62d521-4efe-e8de-8f2d-0d8a10e024cf'
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_not_called()
self.mock_add_qualifier.assert_not_called()
self.mock_add_reference.assert_called_once()
# ensure the right claim was sourced
self.assertEqual(
self.mock_add_reference.call_args[0][1].toJSON()['id'],
expected_claim)
def test_add_new_claim_new_property_with_quals(self):
statement = Statement(self.value)
statement.add_qualifier(self.qual_1).add_qualifier(self.qual_2)
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_called_once()
self.assertEqual(self.mock_add_qualifier.call_count, 2)
self.mock_add_reference.assert_called_once()
def test_add_new_claim_old_property_new_value_with_quals(self):
self.prop = 'P174'
statement = Statement(self.value)
statement.add_qualifier(self.qual_1).add_qualifier(self.qual_2)
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_called_once()
self.assertEqual(self.mock_add_qualifier.call_count, 2)
self.mock_add_reference.assert_called_once()
def test_add_new_claim_old_property_old_value_without_quals(self):
self.prop = 'P174'
self.value = 'A string'
statement = Statement(self.value)
statement.add_qualifier(self.qual_1).add_qualifier(self.qual_2)
expected_claim = 'Q27399$3f62d521-4efe-e8de-8f2d-0d8a10e024cf'
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_not_called()
self.assertEqual(self.mock_add_qualifier.call_count, 2)
self.mock_add_reference.assert_called_once()
self.assertEqual(
self.mock_add_reference.call_args[0][1].toJSON()['id'],
expected_claim)
def test_add_new_claim_old_property_old_value_with_different_quals(self):
self.prop = 'P174'
self.value = 'A string entry with a qualifier'
statement = Statement(self.value)
statement.add_qualifier(self.qual_1).add_qualifier(self.qual_2)
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_called_once()
self.assertEqual(self.mock_add_qualifier.call_count, 2)
self.mock_add_reference.assert_called_once()
def test_add_new_claim_old_property_old_value_with_same_quals(self):
self.prop = 'P174'
self.value = 'A string entry with many qualifiers'
statement = Statement(self.value)
statement.add_qualifier(self.qual_1).add_qualifier(self.qual_2)
expected_claim = 'Q27399$b48a2630-4fbb-932d-4f01-eefcf1e73f59'
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_not_called()
self.assertEqual(self.mock_add_qualifier.call_count, 2)
self.mock_add_reference.assert_called_once()
self.assertEqual(
self.mock_add_reference.call_args[0][1].toJSON()['id'],
expected_claim)
def test_add_new_claim_edit_correct_qualified_claim(self):
self.prop = 'P664'
self.value = 'Duplicate_string'
statement = Statement(self.value)
statement.add_qualifier(
Qualifier('P174', 'qualifier'))
expected_claim = 'Q27399$a9b83de1-49d7-d033-939d-f430a232ffd0'
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_not_called()
self.mock_add_claim.mock_add_qualifier()
self.mock_add_reference.assert_called_once()
self.assertEqual(
self.mock_add_reference.call_args[0][1].toJSON()['id'],
expected_claim)
def test_add_new_claim_edit_correct_qualified_claim_with_ref(self):
self.prop = 'P664'
self.value = 'Duplicate_string_with_ref'
statement = Statement(self.value)
statement.add_qualifier(
Qualifier('P174', 'qualifier'))
expected_claim = 'Q27399$e63f47a3-45ea-e2fc-1363-8f6062205084'
self.wd_stuff.add_new_claim(self.prop, statement, self.wd_page, self.ref)
self.mock_add_claim.assert_not_called()
self.mock_add_claim.mock_add_qualifier()
self.mock_add_reference.assert_called_once()
self.assertEqual(
self.mock_add_reference.call_args[0][1].toJSON()['id'],
expected_claim)
def test_add_new_claim_call_special_has_claim(self):
value = 'somevalue'
statement = Statement(value, special=True)
function = 'wikidatastuff.wikidata_stuff.WikidataStuff.has_special_claim'
with mock.patch(function, autospec=True) as mock_has_special_claim:
self.wd_stuff.add_new_claim(
self.prop, statement, self.wd_page, self.ref)
mock_has_special_claim.assert_called_once_with(
self.wd_stuff, self.prop, value, self.wd_page)
def test_add_new_claim_embedded_ref_used(self):
statement = Statement(self.value)
statement.add_reference(self.mock_ref_2)
self.wd_stuff.add_new_claim(
self.prop, statement, self.wd_page, None)
self.mock_add_reference.assert_called_once()
self.assertEqual(
self.mock_add_reference.call_args[0][2],
self.mock_ref_2)
def test_add_new_claim_provided_ref_overrides_embedded_ref(self):
statement = Statement(self.value)
statement.add_reference(self.mock_ref_2)
self.wd_stuff.add_new_claim(
self.prop, statement, self.wd_page, self.mock_ref_1)
self.mock_add_reference.assert_called_once()
self.assertEqual(
self.mock_add_reference.call_args[0][2],
self.mock_ref_1)
def test_add_new_claim_raise_error_on_bad_ref(self):
statement = Statement(self.value)
with self.assertRaises(pywikibot.Error) as e:
self.wd_stuff.add_new_claim(
self.prop, statement, self.wd_page, 'Not a ref')
self.assertEqual(str(e.exception),
'The provided reference was not a '
'Reference object. Crashing')
def test_add_new_claim_warning_on_duplicate_matching_claim(self):
self.prop = 'P84'
self.value = pywikibot.ItemPage(self.repo, 'Q505')
statement = Statement(self.value)
self.wd_stuff.add_new_claim(
self.prop, statement, self.wd_page, self.ref)
self.mock_warning.assert_called_once_with(
'Problem adding P84 claim to [[wikidata:test:-1]]: '
'Multiple identical claims')
self.mock_add_claim.assert_not_called()
self.mock_add_qualifier.assert_not_called()
self.mock_add_reference.assert_not_called()
class TestMatchClaim(BaseTest):
"""Test match_claim()."""
def setUp(self):
super(TestMatchClaim, self).setUp()
# defaults
self.claims = []
self.qualifiers = []
self.force = False
# Load claims to descriptive variables
# note that only qualifiers + references matter, main value is ignored
# the default qualifier in these is P174:"A qualifier"
self.one_qual_no_ref = self.wd_page.claims['P174'][0]
self.no_qual_two_ref = self.wd_page.claims['P174'][1]
self.no_qual_no_ref = self.wd_page.claims['P174'][2]
# Second qualifier P174:"Another qualifier"
self.two_qual_no_ref = self.wd_page.claims['P174'][5]
# load two qualifier
self.matched_qualifier = Qualifier('P174', 'A qualifier')
self.unmatched_qualifier = Qualifier('P174', 'Unmatched')
def test_match_claim_empty_claim(self):
# 0. no claims: None selected
self.assertIsNone(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force))
def test_match_claim_multiple_exact_raises_error(self):
# 1. many exact matches: raise error (means duplicates on Wikidata)
self.claims.append(self.no_qual_two_ref)
self.claims.append(self.no_qual_no_ref)
with self.assertRaises(pywikibot.Error) as e:
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force)
self.assertEqual(str(e.exception), 'Multiple identical claims')
def test_match_claim_empty_qualifier_exact(self):
# 2. if no qualifier select the unqualified
self.claims.append(self.one_qual_no_ref)
self.claims.append(self.no_qual_no_ref)
expected_claim = self.no_qual_no_ref
self.assertEqual(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force),
expected_claim)
def test_match_claim_one_qualifier_exact(self):
# 2. if qualifier select the qualified
self.claims.append(self.one_qual_no_ref)
self.claims.append(self.no_qual_no_ref)
self.claims.append(self.two_qual_no_ref)
self.qualifiers.append(self.matched_qualifier)
expected_claim = self.one_qual_no_ref
self.assertEqual(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force),
expected_claim)
def test_match_claim_multiple_close_raises_error(self):
# 3. unclaimed i equally close to any with claims
self.claims.append(self.one_qual_no_ref)
self.claims.append(self.two_qual_no_ref)
with self.assertRaises(pywikibot.Error) as e:
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force)
self.assertEqual(str(e.exception), 'Multiple semi-identical claims')
def test_match_claim_one_qualifier_close(self):
# 4. if qualified select the closest match
# (contains at least that qualifier)
self.claims.append(self.no_qual_no_ref)
self.claims.append(self.two_qual_no_ref)
self.qualifiers.append(self.matched_qualifier)
expected_claim = self.two_qual_no_ref
self.assertEqual(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force),
expected_claim)
def test_match_claim_many_non_close(self):
# 5. qualifier does not match any of the claims
self.claims.append(self.one_qual_no_ref)
self.claims.append(self.two_qual_no_ref)
self.qualifiers.append(self.unmatched_qualifier)
self.assertIsNone(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force))
def test_match_claim_one_claim(self):
# 6.1 If only one claim and
# if claim unsourced and unqualified: select this
self.claims.append(self.no_qual_no_ref)
expected_claim = self.no_qual_no_ref
self.qualifiers.append(self.unmatched_qualifier)
self.assertEqual(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force),
expected_claim)
def test_match_claim_one_sourced_claim_forced(self):
# 6.2 If only one claim and
# if sourced and unqualified and force: select this
self.claims.append(self.no_qual_two_ref)
self.qualifiers.append(self.unmatched_qualifier)
self.force = True
expected_claim = self.no_qual_two_ref
self.assertEqual(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force),
expected_claim)
def test_match_claim_one_sourced_claim_none_selected(self):
# 6.3 If only one claim and it is sourced None is selected
self.claims.append(self.no_qual_two_ref)
self.qualifiers.append(self.unmatched_qualifier)
self.assertIsNone(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force))
def test_match_claim_one_qualified_claim_none_selected(self):
# 6.3 If only one claim and it is qualified None is selected
self.claims.append(self.one_qual_no_ref)
self.qualifiers.append(self.unmatched_qualifier)
self.assertIsNone(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force))
def test_match_claim_one_qualified_claim_forced_none_selected(self):
# 6.3 If only one claim and it is qualified None is selected
# even if forced
self.claims.append(self.one_qual_no_ref)
self.qualifiers.append(self.unmatched_qualifier)
self.force = True
self.assertIsNone(
self.wd_stuff.match_claim(
self.claims, self.qualifiers, self.force))
|
# coding=utf-8
# import importlib
# import torch
# from collections import OrderedDict
# from copy import deepcopy
# from os import path as osp
# from tqdm import tqdm
#
# from basicsr.models.archs import define_network
# from basicsr.models.base_model import BaseModel
# from basicsr.utils import get_root_logger, imwrite, tensor2img
#
# loss_module = importlib.import_module('basicsr.models.losses')
# metric_module = importlib.import_module('basicsr.metrics')
#
#
# class SRModel(BaseModel):
# """Base SR model for single image super-resolution."""
#
# def __init__(self, opt):
# super(SRModel, self).__init__(opt)
#
# # define network
# self.net_g = define_network(deepcopy(opt['network_g']))
# self.net_g = self.model_to_device(self.net_g)
# self.print_network(self.net_g)
#
# # load pretrained models
# load_path = self.opt['path'].get('pretrain_network_g', None)
# if load_path is not None:
# self.load_network(self.net_g, load_path,
# self.opt['path'].get('strict_load_g', True))
#
# if self.is_train:
# self.init_training_settings()
#
# def init_training_settings(self):
# self.net_g.train()
# train_opt = self.opt['train']
#
# # define losses
# if train_opt.get('pixel_opt'):
# pixel_type = train_opt['pixel_opt'].pop('type')
# cri_pix_cls = getattr(loss_module, pixel_type)
# self.cri_pix = cri_pix_cls(**train_opt['pixel_opt']).to(
# self.device)
# else:
# self.cri_pix = None
#
# if train_opt.get('perceptual_opt'):
# percep_type = train_opt['perceptual_opt'].pop('type')
# cri_perceptual_cls = getattr(loss_module, percep_type)
# self.cri_perceptual = cri_perceptual_cls(
# **train_opt['perceptual_opt']).to(self.device)
# else:
# self.cri_perceptual = None
#
# if self.cri_pix is None and self.cri_perceptual is None:
# raise ValueError('Both pixel and perceptual losses are None.')
#
# # set up optimizers and schedulers
# self.setup_optimizers()
# self.setup_schedulers()
#
# def setup_optimizers(self):
# train_opt = self.opt['train']
# optim_params = []
# for k, v in self.net_g.named_parameters():
# if v.requires_grad:
# optim_params.append(v)
# else:
# logger = get_root_logger()
# logger.warning(f'Params {k} will not be optimized.')
#
# optim_type = train_opt['optim_g'].pop('type')
# if optim_type == 'Adam':
# self.optimizer_g = torch.optim.Adam(optim_params,
# **train_opt['optim_g'])
# else:
# raise NotImplementedError(
# f'optimizer {optim_type} is not supperted yet.')
# self.optimizers.append(self.optimizer_g)
#
# def feed_data(self, data):
# self.lq = data['lq'].to(self.device)
# if 'gt' in data:
# self.gt = data['gt'].to(self.device)
#
# def optimize_parameters(self, current_iter):
# self.optimizer_g.zero_grad()
# self.output = self.net_g(self.lq)
#
# l_total = 0
# loss_dict = OrderedDict()
# # pixel loss
# if self.cri_pix:
# l_pix = self.cri_pix(self.output, self.gt)
# l_total += l_pix
# loss_dict['l_pix'] = l_pix
# # perceptual loss
# if self.cri_perceptual:
# l_percep, l_style = self.cri_perceptual(self.output, self.gt)
# if l_percep is not None:
# l_total += l_percep
# loss_dict['l_percep'] = l_percep
# if l_style is not None:
# l_total += l_style
# loss_dict['l_style'] = l_style
#
# l_total.backward()
# self.optimizer_g.step()
#
# self.log_dict = self.reduce_loss_dict(loss_dict)
#
# def test(self):
# self.net_g.eval()
# x = self.lq.squeeze(0).permute(1, 2, 0)
# sr = []
# with torch.no_grad(): # 在这里增加测试模型融合
# for rot in range(0, 4):
# for flip in [False, True]:
# _x = x.flip([1]) if flip else x
# _x = _x.rot90(rot)
# out = self.net_g(_x.permute(2, 0, 1).unsqueeze(0)).squeeze(0).permute(1, 2, 0)
# out = out.rot90(4 - rot)
# out = out.flip([1]) if flip else out
# sr.append(out)
# self.output = torch.stack(sr).mean(0).permute(2, 0, 1)
# self.net_g.train()
#
# def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
# logger = get_root_logger()
# logger.info('Only support single GPU validation.')
# self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
#
# def nondist_validation(self, dataloader, current_iter, tb_logger,
# save_img):
# dataset_name = dataloader.dataset.opt['name']
# with_metrics = self.opt['val'].get('metrics') is not None
# if with_metrics:
# self.metric_results = {
# metric: 0
# for metric in self.opt['val']['metrics'].keys()
# }
# pbar = tqdm(total=len(dataloader), unit='image')
#
# for idx, val_data in enumerate(dataloader):
# img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
# self.feed_data(val_data)
# self.test()
#
# visuals = self.get_current_visuals()
# sr_img = tensor2img([visuals['result']])
# if 'gt' in visuals:
# gt_img = tensor2img([visuals['gt']])
# del self.gt
#
# # tentative for out of GPU memory
# del self.lq
# del self.output
# torch.cuda.empty_cache()
#
# if save_img:
# if self.opt['is_train']:
# save_img_path = osp.join(self.opt['path']['visualization'],
# img_name,
# f'{img_name}_{current_iter}.png')
# else:
# if self.opt['val']['suffix']:
# save_img_path = osp.join(
# self.opt['path']['visualization'], dataset_name,
# f'{img_name}_{self.opt["val"]["suffix"]}.png')
# else:
# save_img_path = osp.join(
# self.opt['path']['visualization'], dataset_name,
# f'{img_name}_{self.opt["name"]}.png')
# imwrite(sr_img, save_img_path)
#
# if with_metrics:
# # calculate metrics
# opt_metric = deepcopy(self.opt['val']['metrics'])
# for name, opt_ in opt_metric.items():
# metric_type = opt_.pop('type')
# self.metric_results[name] += getattr(
# metric_module, metric_type)(sr_img, gt_img, **opt_)
# pbar.update(1)
# pbar.set_description(f'Test {img_name}')
# pbar.close()
#
# if with_metrics:
# for metric in self.metric_results.keys():
# self.metric_results[metric] /= (idx + 1)
#
# self._log_validation_metric_values(current_iter, dataset_name,
# tb_logger)
#
# def _log_validation_metric_values(self, current_iter, dataset_name,
# tb_logger):
# log_str = f'Validation {dataset_name}\n'
# for metric, value in self.metric_results.items():
# log_str += f'\t # {metric}: {value:.4f}\n'
# logger = get_root_logger()
# logger.info(log_str)
# if tb_logger:
# for metric, value in self.metric_results.items():
# tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
#
# def get_current_visuals(self):
# out_dict = OrderedDict()
# out_dict['lq'] = self.lq.detach().cpu()
# out_dict['result'] = self.output.detach().cpu()
# if hasattr(self, 'gt'):
# out_dict['gt'] = self.gt.detach().cpu()
# return out_dict
#
# def save(self, epoch, current_iter):
# self.save_network(self.net_g, 'net_g', current_iter)
# self.save_training_state(epoch, current_iter)
import importlib
import torch
from collections import OrderedDict
from copy import deepcopy
from os import path as osp
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from basicsr.models.archs import define_network
from basicsr.models.base_model import BaseModel
from basicsr.utils import get_root_logger, imwrite, tensor2img
import cv2
loss_module = importlib.import_module('basicsr.models.losses')
metric_module = importlib.import_module('basicsr.metrics')
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-3):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
# loss = torch.sum(torch.sqrt(diff * diff + self.eps))
loss = torch.mean(torch.sqrt((diff * diff) + (self.eps * self.eps)))
return loss
class GaussianConv(nn.Module):
def __init__(self, kernel_size=5, channels=3, sigma=2.0):
super(GaussianConv, self).__init__()
kernel = self.gauss_kernel(kernel_size, sigma)
kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0) # (H, W) -> (1, 1, H, W)
kernel = kernel.expand((int(channels), 1, kernel_size, kernel_size))
self.weight = nn.Parameter(kernel, requires_grad=False).cuda()
self.channels = channels
def forward(self, x):
return F.conv2d(x, self.weight, padding=2, groups=self.channels)
def gauss_kernel(self, size=5, sigma=2.0):
grid = cv2.getGaussianKernel(size, sigma)
kernel = grid * grid.T
return kernel
class LaplacianPyramid(nn.Module):
def __init__(self, max_level=5):
super(LaplacianPyramid, self).__init__()
self.gaussian_conv = GaussianConv()
self.max_level = max_level
def forward(self, X):
t_pyr = []
current = X
for level in range(self.max_level - 1):
t_guass = self.gaussian_conv(current)
t_diff = current - t_guass
t_pyr.append(t_diff)
current = F.avg_pool2d(t_guass, 2)
t_pyr.append(current)
return t_pyr
class LaplacianLoss(nn.Module):
def __init__(self, loss_weight=1.0, reduction='sum', max_level=5):
super(LaplacianLoss, self).__init__()
self.criterion = nn.L1Loss(reduction=reduction)
self.lap = LaplacianPyramid(max_level=max_level)
self.loss_weight = loss_weight
def forward(self, x, y):
x_lap, y_lap = self.lap(x), self.lap(y)
diff_levels = [self.criterion(a, b) for a, b in zip(x_lap, y_lap)]
return self.loss_weight * sum(2 ** (j - 1) * diff_levels[j] for j in range(len(diff_levels)))
class EdgeLoss(nn.Module):
def __init__(self):
super(EdgeLoss, self).__init__()
k = torch.Tensor([[.05, .25, .4, .25, .05]])
self.kernel = torch.matmul(k.t(), k).unsqueeze(0).repeat(3, 1, 1, 1)
if torch.cuda.is_available():
self.kernel = self.kernel.cuda()
self.loss = CharbonnierLoss()
def conv_gauss(self, img):
n_channels, _, kw, kh = self.kernel.shape
img = F.pad(img, (kw // 2, kh // 2, kw // 2, kh // 2), mode='replicate')
return F.conv2d(img, self.kernel, groups=n_channels)
def laplacian_kernel(self, current):
filtered = self.conv_gauss(current) # filter
down = filtered[:, :, ::2, ::2] # downsample
new_filter = torch.zeros_like(filtered)
new_filter[:, :, ::2, ::2] = down * 4 # upsample
filtered = self.conv_gauss(new_filter) # filter
diff = current - filtered
return diff
def forward(self, x, y):
loss = self.loss(self.laplacian_kernel(x), self.laplacian_kernel(y))
return loss
class SRModel(BaseModel):
"""Base SR model for single image super-resolution."""
def __init__(self, opt):
super(SRModel, self).__init__(opt)
# define network
# self.net_g = define_network(deepcopy(opt['network_g']))
# self.net_g = self.model_to_device(self.net_g)
# self.print_network(self.net_g)
for i in range(6):
setattr(self, 'net_g_' + str(i), self.model_to_device(define_network(deepcopy(opt['network_g']))))
self.load_network(self.net_g_0,
'/home/rpf/tgp/BasicSR/experiments/WITH_MPR_PRE_256_l2_C/models/net_g_5000.pth',
self.opt['path'].get('strict_load_g', True))
self.load_network(self.net_g_1,
'/home/rpf/tgp/BasicSR/experiments/WITH_MPR_PRE_256_l2_C/models/net_g_10000.pth',
self.opt['path'].get('strict_load_g', True))
self.load_network(self.net_g_2,
'/home/rpf/tgp/BasicSR/experiments/WITH_MPR_PRE_256_l2_C/models/net_g_15000.pth',
self.opt['path'].get('strict_load_g', True))
self.load_network(self.net_g_3,
'/home/rpf/tgp/BasicSR/experiments/WITH_MPR_PRE_256_l2_C/models/net_g_20000.pth',
self.opt['path'].get('strict_load_g', True))
# self.load_network(self.net_g_6, load_path, self.opt['path'].get('strict_load_g', True))
# self.load_network(self.net_g_7, load_path, self.opt['path'].get('strict_load_g', True))
self.criterion_edge = EdgeLoss()
self.laploss = LaplacianLoss()
# load pretrained models
# load_path = self.opt['path'].get('pretrain_network_g', None)
# if load_path is not None:
# self.load_network(self.net_g, load_path,
# self.opt['path'].get('strict_load_g', True))
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
# define losses
if train_opt.get('pixel_opt'):
pixel_type = train_opt['pixel_opt'].pop('type')
cri_pix_cls = getattr(loss_module, pixel_type)
self.cri_pix = cri_pix_cls(**train_opt['pixel_opt']).to(
self.device)
else:
self.cri_pix = None
if train_opt.get('perceptual_opt'):
percep_type = train_opt['perceptual_opt'].pop('type')
cri_perceptual_cls = getattr(loss_module, percep_type)
self.cri_perceptual = cri_perceptual_cls(
**train_opt['perceptual_opt']).to(self.device)
else:
self.cri_perceptual = None
if self.cri_pix is None and self.cri_perceptual is None:
raise ValueError('Both pixel and perceptual losses are None.')
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt['train']
optim_params = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f'Params {k} will not be optimized.')
optim_type = train_opt['optim_g'].pop('type')
if optim_type == 'Adam':
self.optimizer_g = torch.optim.Adam(optim_params,
**train_opt['optim_g'])
else:
raise NotImplementedError(
f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_g)
def feed_data(self, data):
self.lq = data['lq'].to(self.device)
self.bic = data['bic'].to(self.device)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
def getdwt(self, x):
x01 = x[:, :, 0::2, :] / 2
x02 = x[:, :, 1::2, :] / 2
x1 = x01[:, :, :, 0::2]
x2 = x02[:, :, :, 0::2]
x3 = x01[:, :, :, 1::2]
x4 = x02[:, :, :, 1::2]
x_HL = -x1 - x2 + x3 + x4
x_LH = -x1 + x2 - x3 + x4
x_HH = x1 - x2 - x3 + x4
return torch.cat((x_HL, x_LH, x_HH), 1)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.lr_output, self.output = self.net_g(self.lq) # 这里返回bic lr和sr,然后增加loss
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_pix:
pix_loss = self.cri_pix(self.output, self.gt) + self.cri_pix(self.lr_output, self.bic)
hf_loss = self.cri_pix(self.getdwt(self.lr_output), self.getdwt(
self.bic)) # + self.cri_pix(self.getdwt(self.output), self.getdwt(self.gt))
# edge_loss = self.criterion_edge(self.output, self.gt) # + self.criterion_edge(self.lr_output, self.bic)
lap_loss = self.laploss(self.output, self.gt)
l_pix = pix_loss + 10 * hf_loss + lap_loss * 1e-5
# print('pix_loss,hf_loss,lap_loss', pix_loss.item(), hf_loss.item(), lap_loss.item())
l_total += l_pix
loss_dict['l_pix'] = l_pix
# perceptual loss
if self.cri_perceptual:
l_percep, l_style = self.cri_perceptual(self.output, self.gt)
if l_percep is not None:
l_total += l_percep
loss_dict['l_percep'] = l_percep
if l_style is not None:
l_total += l_style
loss_dict['l_style'] = l_style
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
def test(self):
self.net_g.eval()
with torch.no_grad():
self.lr_output, self.output = self.net_g(self.lq)
self.net_g.train()
def back_projection(self, iter, sr, lr): # torch.Size([720, 1280, 3]) torch.Size([180, 320, 3]
sr = sr.permute(2, 0, 1).unsqueeze(0) # 超分结果
lr = lr.permute(2, 0, 1).unsqueeze(0) # 模糊小图
for i in range(iter):
# bic
lr_bic = F.interpolate(sr, size=None, scale_factor=(0.25, 0.25), mode='bicubic', align_corners=False)
sr = sr + self.net_g((lr - lr_bic).clone())[1]
# sr = sr + F.interpolate(lr - lr_bic, size=None, scale_factor=(4, 4), mode='bicubic', align_corners=False)
return sr.squeeze(0).permute(1, 2, 0)
def final_test(self): # 除了自相似集成,加入BP
for i in range(4):
getattr(self, 'net_g_' + str(i)).eval()
x = self.lq.squeeze(0).permute(1, 2, 0)
sr = []
with torch.no_grad(): # 在这里增加测试模型融合
for rot in range(0, 4):
for flip in [False, True]:
_x = x.flip([1]) if flip else x
_x = _x.rot90(rot)
out_sr = self.net_g_0(_x.permute(2, 0, 1).unsqueeze(0).clone())
out_sr = out_sr.squeeze(0).permute(1, 2, 0)
out_sr = out_sr.rot90(4 - rot)
out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
sr.append(out_sr)
for rot in range(0, 4):
for flip in [False, True]:
_x = x.flip([1]) if flip else x
_x = _x.rot90(rot)
out_sr = self.net_g_1(_x.permute(2, 0, 1).unsqueeze(0).clone())
out_sr = out_sr.squeeze(0).permute(1, 2, 0)
out_sr = out_sr.rot90(4 - rot)
out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
sr.append(out_sr)
for rot in range(0, 4):
for flip in [False, True]:
_x = x.flip([1]) if flip else x
_x = _x.rot90(rot)
out_sr = self.net_g_2(_x.permute(2, 0, 1).unsqueeze(0).clone())
out_sr = out_sr.squeeze(0).permute(1, 2, 0)
out_sr = out_sr.rot90(4 - rot)
out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
sr.append(out_sr)
for rot in range(0, 4):
for flip in [False, True]:
_x = x.flip([1]) if flip else x
_x = _x.rot90(rot)
out_sr = self.net_g_3(_x.permute(2, 0, 1).unsqueeze(0).clone())
out_sr = out_sr.squeeze(0).permute(1, 2, 0)
out_sr = out_sr.rot90(4 - rot)
out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
sr.append(out_sr)
#for rot in range(0, 4):
# for flip in [False, True]:
# _x = x.flip([1]) if flip else x
# _x = _x.rot90(rot)
# out_lr, out_sr = self.net_g_4(_x.permute(2, 0, 1).unsqueeze(0).clone())
# out_sr = out_sr.squeeze(0).permute(1, 2, 0)
# out_sr = out_sr.rot90(4 - rot)
# out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
# sr.append(out_sr)
#for rot in range(0, 4):
# for flip in [False, True]:
# _x = x.flip([1]) if flip else x
# _x = _x.rot90(rot)
# out_lr, out_sr = self.net_g_5(_x.permute(2, 0, 1).unsqueeze(0).clone())
# out_sr = out_sr.squeeze(0).permute(1, 2, 0)
# out_sr = out_sr.rot90(4 - rot)
# out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
# sr.append(out_sr)
# for rot in range(0, 4):
# for flip in [False, True]:
# _x = x.flip([1]) if flip else x
# _x = _x.rot90(rot)
# out_lr, out_sr = self.net_g_6(_x.permute(2, 0, 1).unsqueeze(0).clone())
# out_sr = out_sr.squeeze(0).permute(1, 2, 0)
# out_sr = out_sr.rot90(4 - rot)
# out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
# sr.append(out_sr)
#
# for rot in range(0, 4):
# for flip in [False, True]:
# _x = x.flip([1]) if flip else x
# _x = _x.rot90(rot)
# out_lr, out_sr = self.net_g_7(_x.permute(2, 0, 1).unsqueeze(0).clone())
# out_sr = out_sr.squeeze(0).permute(1, 2, 0)
# out_sr = out_sr.rot90(4 - rot)
# out_sr = out_sr.flip([1]) if flip else out_sr # 720, 1280, 3
# sr.append(out_sr)
self.output = torch.stack(sr).mean(0).permute(2, 0, 1).clamp(0, 1)
for i in range(4):
getattr(self, 'net_g_' + str(i)).train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
logger = get_root_logger()
logger.info('Only support single GPU validation.')
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger,
save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = self.opt['val'].get('metrics') is not None
if with_metrics:
self.metric_results = {
metric: 0
for metric in self.opt['val']['metrics'].keys()
}
pbar = tqdm(total=len(dataloader), unit='image')
for idx, val_data in enumerate(dataloader):
img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
self.feed_data(val_data)
self.final_test()
visuals = self.get_current_visuals()
sr_img = tensor2img([visuals['result']])
if 'gt' in visuals:
gt_img = tensor2img([visuals['gt']])
del self.gt
# tentative for out of GPU memory
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt['is_train']:
save_img_path = osp.join(self.opt['path']['visualization'],
img_name,
f'{img_name}_{current_iter}.png')
else:
if self.opt['val']['suffix']:
save_img_path = osp.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}_{self.opt["val"]["suffix"]}.png')
else:
save_img_path = osp.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}_{self.opt["name"]}.png')
imwrite(sr_img, save_img_path)
if with_metrics:
# calculate metrics
opt_metric = deepcopy(self.opt['val']['metrics'])
for name, opt_ in opt_metric.items():
metric_type = opt_.pop('type')
self.metric_results[name] += getattr(
metric_module, metric_type)(sr_img, gt_img, **opt_)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
self._log_validation_metric_values(current_iter, dataset_name,
tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name,
tb_logger):
log_str = f'Validation {dataset_name}\n'
for metric, value in self.metric_results.items():
log_str += f'\t # {metric}: {value:.4f}\n'
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['lq'] = self.lq.detach().cpu()
out_dict['result'] = self.output.detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt.detach().cpu()
return out_dict
def save(self, epoch, current_iter):
self.save_network(self.net_g, 'net_g', current_iter)
self.save_training_state(epoch, current_iter)
|
from flask import request, current_app
from flask.views import MethodView
from app.helpers import (
response_data,
args_check,
login_required,
JSONObject
)
from app.v0.users.validators import UserValidator, ChangePasswordValidator, RequestPasswordChangeValidator
from app.errors import E000, E007, E008, E009, E015, E016, E017, E018, E019
from app.models import User
from app.v0.users import users
@args_check(RequestPasswordChangeValidator())
@users.route("/v0/change_password_request", methods=["POST"])
def request_change_password():
"""Handles PATCH request"""
json_data = JSONObject(request.json)
with current_app.app_context():
email = (
json_data.email.strip()
)
user = (
User.query.filter_by(email=email)
.first()
)
if user:
access_token = user.generate_token(minutes=30)
res = (
response_data(
data=None
)
)
return res
else:
# E018 = Email does not exists
res = response_data(
data=None,
error_code=E018
)
return res
# E017 = Incorrect Password
res = response_data(
data=None,
error_code=E017
)
return res
@args_check(ChangePasswordValidator())
@users.route("/v0/change_password/<string:token>", methods=["POST"])
def change_password(token):
"""Handles PATCH request"""
json_data = JSONObject(request.json)
with current_app.app_context():
valid, user_id = User.decode_token(token.strip())
if valid:
user = User.query.get(user_id)
if user:
if user.password_is_valid(json_data.old_password):
new_password = json_data.new_password
access_token = user.generate_token()
user.set_password(new_password)
user.save()
res = (
response_data(
{ "public_id" : user.public_id }
)
)
return res
else:
# E017 = Incorrect Password
res = response_data(
data=None,
error_code=E017
)
return res
# E019 = Expired token
res = response_data(
data=None,
error_code=E019
)
return res
# E000 = Server Error
res = response_data(
data=None,
error_code=E000
)
return res
class UserView(MethodView):
def get(self, public_id):
"""Handles GET request"""
with current_app.app_context():
user = (
User.query.filter_by(public_id=public_id)
.first()
)
if user:
res = (
response_data(
user.to_dict(flag=1)
)
)
return res
# E009 = User Does Not Exist (404)
res = response_data(
data=None,
error_code=E009
)
return res
@args_check(UserValidator())
@login_required()
def put(self, current_user):
"""Handles PUT request"""
current_user_id = current_user["id"]
json_data = JSONObject(request.json)
with current_app.app_context():
user = User.query.get(current_user_id)
if user:
name = json_data.name.lower().strip()
description = json_data.description.strip()
email = json_data.email.lower().strip()
images = request.json.get("images", None)
if email != user.email:
_user = User.query.filter_by(email=email).first()
if _user:
# E016 = Email Already Exists
res = response_data(
data=None,
error_code=E016
)
return res
# Update Email
user.email = email
if name:
user.name = name
if description:
user.description = description
user.save()
res = response_data(
data=user.to_dict()
)
return res
# E009 = User Does Not Exist (404)
res = response_data(
data=None,
error_code=E009
)
return res
@login_required()
def delete(self, current_user):
"""Handles DELETE request"""
current_user_id = current_user["id"]
with current_app.app_context():
user = User.query.get(current_user_id)
if user:
user.deleted = True
user.save()
res = response_data(
data=None
)
return res
# E009 = User Does Not Exist (404)
res = response_data(
data=None,
error_code=E009
)
return res
|
import os
import csv
import glob
import json
def load_stripped_dataset(data_dir):
users = {}
for filepath in glob.glob(os.path.join(data_dir, '*.json')):
with open(filepath, 'r') as f:
d = json.loads(f.read())
for raw_uid in d:
uid = int(raw_uid)
assert uid not in users
users[uid] = d[raw_uid]
return users
def load_bot_scores(filepath):
scores = {}
with open(filepath, 'r') as f:
r = csv.reader(f, delimiter=',')
next(r)
for row in r:
uid = int(row[0])
score = float(row[3])
assert uid not in scores
scores[uid] = score
return scores
def remove_bot_scores(users_filepath, bot_scores_filepath, dest_filepath):
users = load_stripped_dataset()
bot_scores = load_bot_scores()
new_users = {}
for uid in users:
if uid in bot_scores and bot_scores[uid] < 0.8:
new_users[uid] = users[uid]
logging.debug('Users: {}'.format(len(users)))
logging.debug('New users: {}'.format(len(new_users)))
with open(dest_filepath, 'w') as f:
f.write(json.dumps(new_users))
if __name__ == 'main':
parser = argparse.ArgumentParser(
description=(''),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('dataset_dir', type=str, help='')
parser.add_argument('bot_scores_file', type=str, help='')
parser.add_argument('dest_file', type=str, help='')
args = parser.parse_args()
if not os.path.exists(os.path.dirname(args.dest_dir)):
os.makedirs(os.path.dirname(args.dest_dir))
remove_bot_scores(
args.dataset_dir,
args.bot_scores_file,
args.dest_file
)
|
banner="http://www.pythonchallenge.com/pc/def/banner.p"
dir=".\\Data\\005"
import helper
helper.ensureDir(dir)
import urllib.request
(filename,headers)=urllib.request.urlretrieve(banner,dir+'\\banner.p')
import pickle
data=pickle.Unpickler(open(filename,'rb')).load()
# print(data)
for line in data:
for tupleitem in line:
print(tupleitem[0]*tupleitem[1],end='')
print('')
# ================================
# without pickle
fp=open(filename,'r')
lines=fp.readlines()
fp.close()
import re
reln=re.compile('aa')
renum=re.compile('^I([0-9]*)')
rechsharp=re.compile("S'#'|g6")
rechspace=re.compile("S' '|g2")
for line in lines:
if reln.search(line) != None:
print('\n',end='')
continue
if rechsharp.search(line) != None:
ch='#'
continue
if rechspace.search(line) != None:
ch=' '
continue
if renum.search(line) != None:
num=renum.search(line).group(1)
print(ch*int(num),end='')
continue
# ================================
|
""" link.py:
Helper functions for generating links.
"""
import settings
def spreadsheet(file_id):
return 'https://docs.google.com/spreadsheets/d/' + file_id
def unsubscribe(user_email):
return 'http://www.' + settings.URL + '/unsubscribe?email=' + user_email
|
from ..backend import Kernel
from ..HKernel import HKernel
from ..SCacheton import SCacheton
from ..Tensor import Tensor
from .Initializer import Initializer
class InitConst(Initializer):
def __init__(self, value=0):
"""
arguments
value(0)
"""
super().__init__()
self._value = value
def initialize_tensor(self, tensor : Tensor):
key = (InitConst, self._value, tensor.dtype)
kernel = SCacheton.get_var(key)
if kernel is None:
kernel = Kernel(kernel_text=f"""
{HKernel.define_tensor('O', (tensor.shape.size,), tensor.dtype )}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME)
{{
O_GLOBAL_STORE(get_global_id(0), (O_TYPE){self._value} );
}}
""")
SCacheton.set_var(key, kernel)
tensor.get_device().run_kernel( kernel, tensor.get_buffer(),
global_shape=(tensor.shape.size,) )
def __str__(self): return f'InitConst low={self._low}, high={self._high}'
|
from django import forms
from django.conf import settings
from reviews import models as review_models
from . import models
class CelebrityForm(forms.ModelForm):
class Meta:
model = models.Celebrity
fields = '__all__'
widgets = {
'duties': forms.CheckboxSelectMultiple,
'source_image': forms.Textarea,
}
# def clean_added_by(self):
# if not self.cleaned_data['added_by']:
# return settings.AUTH_USER_MODEL
# return self.cleaned_data['added_by']
# def save(self, *args, **kwargs):
# self.added_by = self.request['user']
# form = super(CelebrityForm, self).save(*args, **kwargs)
# form.save()
class CommentForm(forms.ModelForm):
class Meta:
model = review_models.CelebComment
fields = ('text', 'celeb', 'user')
widgets = {
'text': forms.Textarea(attrs={'rows':5}),
'celeb': forms.HiddenInput,
'user': forms.HiddenInput,
}
|
'''
Have the function SimpleSymbols(str) take the str parameter being passed and determine if it is an acceptable sequence by either returning the string true or false.
The str parameter will be composed of + and = symbols with several letters between them (ie. ++d+===+c++==a)
and for the string to be true each letter must be surrounded by a + symbol. So the string to the left would be false.
The string will not be empty and will have at least one letter.
'''
def SimpleSymbols(str):
alph = "abcdefghijklmnopqrstuvwxyz"
x = True
if (str[0] in alph) or (str[-1] in alph):
return False
for char in str:
char = char.lower()
if char in alph:
indx_p = str.index(char)+1
indx_m = str.index(char)-1
if (str[indx_p] == '+') and (str[indx_m] == '+'):
pass
else:
x = False
return x
test = '"+d+=3=+s+"'
test2 = '+=+f++d'
print(SimpleSymbols(test2))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('analytics', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='case',
name='missing_amount_currency',
field=models.CharField(default='0', help_text=b'Divisa', max_length=1, choices=[(b'0', b'dolar US'), (b'1', b'euro')]),
preserve_default=False,
),
]
|
from math import sqrt
import json
import sys
import multiprocessing as mp
from utils import _progBar, _chunkIt, loadFile, loadJSON
class Recommender:
def __init__(self, user_source, object_source, processes=4):
self.user_data = user_source
self.object_data = object_source
self.objects = self.object_data.keys()
self.users = {}
self.processes = processes
counter = 0
for u_data in self.user_data:
if not isinstance(u_data, set):
u_data['deps'] = set(u_data['deps'])
# Percent of dependencies for this package versus all packages
u_data['n'] = len(u_data['deps']) / len(self.objects)
# Add ID field
u_data['id'] = counter
self.users[counter] = u_data
counter += 1
def similarity(self, pkg_a, pkg_b):
""" Uses Pearson correlation coefficient """
ntor = 0.0
dtor_a = 0.0
dtor_b = 0.0
deps = pkg_a['deps'] | pkg_b['deps']
for j in pkg_a['deps'] | pkg_b['deps']:
x_aj = 1 if (j in pkg_a['deps']) else 0
x_bj = 1 if (j in pkg_b['deps']) else 0
ntor += (x_aj - pkg_a['n']) * (x_bj - pkg_b['n'])
dtor_a += (x_aj - pkg_a['n']) ** 2
dtor_b += (x_bj - pkg_b['n']) ** 2
# number of packages that are not dependencies
not_deps = len(self.objects) - len(deps)
# update numbers for non dependencies
ntor += not_deps * (pkg_a['n']) * (pkg_b['n'])
dtor_a += not_deps*(pkg_a['n']) ** 2
dtor_b += not_deps*(pkg_b['n']) ** 2
# Handle negative numbers on numerator
factor = 1.0 if dtor_a*dtor_b >= 0 else -1.0
dtor = factor*sqrt(abs(dtor_a * dtor_b)) + 0.0001
return ntor / dtor
def recommend_job(self, pkg, objects={}, sims_cache={}, ret=None, print_progress=False):
if ret == None:
raise Error("Must define return method for each ")
if print_progress:
print("Started New Job")
recs = {i : 0 for i in objects}
n_a = pkg['n']
# Loop through objects
counter = 0
for j in objects:
ntor = 0.0
dtor = 0.0001
if print_progress:
_progBar(counter, len(objects))
counter += 1
# Loop through users
for i, u_data in self.users.items():
x_ij = 1 if (j in u_data['deps']) else 0
ntor += sims_cache[i] * (x_ij - u_data['n'])
dtor += sims_cache[i]
recs[j] = ntor / dtor
recs = {i : recs[i] + n_a for i in objects}
if print_progress:
print("Finished Job")
ret.put(recs)
return
def recommend(self, pkg, print_progress=False):
n_a = len(pkg['deps']) / len(self.objects)
pkg['n'] = n_a
if not isinstance(pkg, set):
pkg['deps'] = set(pkg['deps'])
# Build up similarity cache
sims_cache = {}
for i, u_data in self.users.items():
sims_cache[i] = self.similarity(u_data, pkg)
jobs = []
objects = _chunkIt(list(self.objects), self.processes)
for job_id in range(0, self.processes):
q = mp.Queue()
process = mp.Process(target=self.recommend_job,
args=(pkg, objects[job_id], sims_cache, q, print_progress))
process.start()
jobs.append((q, process))
recs = {}
for q, j in jobs:
recs.update(q.get())
j.join()
return recs
if __name__ == '__main__':
# Load data from files
user_source = loadFile('data/user_pkgs.txt')
user_data = []
for u in user_source:
user_data.append(json.loads(u))
object_source = loadJSON('data/object_pkgs.txt')
r = Recommender(user_data, object_source, processes=2)
deps = set({
"@prairielearn/prairielib": "^1.5.2",
"ace-builds": "^1.4.2",
"adm-zip": "^0.4.13",
"archiver": "^3.0.0",
"async": "^2.6.1",
"async-stacktrace": "0.0.2",
"aws-sdk": "^2.382.0",
"backbone": "^1.3.3",
"base64url": "^3.0.1",
"blocked-at": "^1.1.3",
"body-parser": "^1.18.3",
"bootstrap": "^4.3.1",
"byline": "^5.0.0",
"chart.js": "^2.7.3",
"cheerio": "^0.22.0",
"clipboard": "^2.0.4",
"cookie-parser": "^1.4.3",
"crypto-js": "^3.1.9-1",
"csv": "^5.0.1",
"csvtojson": "^2.0.8",
"debug": "^4.1.1",
"diff": "^3.5.0",
"dockerode": "^2.5.5",
"ejs": "^2.6.1",
"express": "^4.16.4",
"fs-extra": "^7.0.1",
"googleapis": "^36.0.0",
"handlebars": "^4.1.0",
"http-status": "^1.3.1",
"is-my-json-valid": "^2.17.2",
"javascript-natural-sort": "^0.7.1",
"jju": "^1.3.0",
"jquery": "^3.3.1",
"json-stringify-safe": "^5.0.1",
"lodash": "^4.17.10",
"lru-cache": "^5.1.1",
"mathjax": "^2.7.4",
"mersenne": "0.0.4",
"moment": "^2.23.0",
"multer": "^1.4.1",
"mustache": "^3.0.1",
"nodemon": "^1.18.9",
"numeric": "^1.2.6",
"oauth-signature": "^1.5.0",
"on-finished": "^2.3.0",
"parse5": "^5.0.0",
"passport": "^0.4.0",
"passport-azure-ad": "^4.0.0",
"pg": "^7.7.1",
"plist": "^3.0.0",
"popper.js": "^1.14.6",
"qrcode-svg": "^1.0.0",
"redis": "^2.8.0",
"redis-lru": "^0.5.0",
"request-promise-native": "^1.0.5",
"requirejs": "^2.3.5",
"s3-upload-stream": "^1.0.7",
"search-string": "^3.1.0",
"serve-favicon": "^2.5.0",
"socket.io": "^2.2.0",
"socket.io-client": "^2.2.0",
"socket.io-redis": "^5.2.0",
"streamifier": "^0.1.1",
"supports-color": "^6.0.0",
"tar": "^4.4.8",
"three": "^0.99.0",
"uuid": "^3.2.1",
"viz.js": "^2.1.2",
"winston": "^3.1.0",
"yargs": "^12.0.5",
"yargs-parser": "^11.1.1",
"chai": "^4.1.2",
"colors": "^1.3.3",
"coveralls": "^3.0.1",
"eslint": "^5.11.1",
"jsdoc": "^3.5.5",
"mocha": "^5.2.0",
"nyc": "^13.3.0",
"request": "^2.87.0",
"tmp": "0.0.33"
}.keys())
deps = {
"@turf/turf": "^5.1.6",
"cookie-parser": "^1.4.4",
}
pkg = {
'name': 'test',
'deps': deps
}
recs = r.recommend(pkg)
recommendations = [(k, recs[k]) for k in sorted(recs, key=recs.get, reverse=True)]
counter = 0
for item, val in recommendations:
if item not in deps:
print(item, val, sep=",")
counter += 1
if counter > 20:
break
|
import hashlib
import json
import logging
import os
from typing import List, Optional
from pip._vendor import requests
from toncommon.contextmanager import secret_manager
from toncommon.core import TonExec
from toncommon.models.TonCoin import TonCoin
from toncommon.models.depool.DePoolElectionEvent import DePoolElectionEvent
from toncommon.models.depool.DePoolEvent import DePoolEvent
from toncommon.models.depool.DePoolLowBalanceEvent import DePoolLowBalanceEvent
from toncommon.models.TonAccount import TonAccount
from toncommon.models.TonTransaction import TonTransaction
log = logging.getLogger("tonoscli")
class TonosCli(TonExec):
"""
Python wrapper for tonos CLI
"""
CONFIG_NAME = "tonos-cli.conf.json"
def __init__(self, cli_path, cwd, config_url, abi_path=None, tvc_path=None):
super().__init__(cli_path)
with open(cli_path, "rb") as f:
h = hashlib.md5(f.read())
h.update(config_url.encode())
self._cwd = os.path.join(cwd, h.hexdigest())
self._config_url = config_url
self._abi_path = abi_path
self._tvc_path = tvc_path
def _run_command(self, command: str, options: list = None, retries=5):
"""
./tonos-cli <command> <options>
"""
if not os.path.exists(os.path.join(self._cwd, TonosCli.CONFIG_NAME)):
os.makedirs(self._cwd, exist_ok=True)
for i in range(retries):
ret, out = self._execute(["config", "--url", self._config_url],
cwd=self._cwd)
if ret != 0:
if out and "timeout" in out.lower():
log.info("Retrying tonos command due to timeout")
continue
if not os.path.exists(os.path.join(self._cwd, TonosCli.CONFIG_NAME)):
raise Exception("Failed to initialize tonos-cli: {}".format(out))
break
if options is None:
options = []
args = [command] + options
log.debug("Running: {} {}".format(self._exec_path, args))
ret, out = self._execute(args, cwd=self._cwd)
if ret != 0:
raise Exception("Failed to run command {}: {}".format(command, out))
return out
def _materialize_abi(self, abi_url):
log.info("Materialising ABI url: {}".format(abi_url))
cached_path = "{}.json".format(os.path.join(self._cwd, hashlib.md5(abi_url.encode()).hexdigest()))
if not os.path.exists(cached_path):
log.info("Downloading ABI from: {}".format(abi_url))
resp = requests.get(abi_url, allow_redirects=True)
open(cached_path, 'wb').write(resp.content)
return cached_path
def _parse_result(self, output: str) -> (dict, None):
if "Result" in output:
result_keyword = 'Result: '
substr = output[output.find(result_keyword) + len(result_keyword):]
obj = json.loads(substr)
return obj
return None
def get_account(self, address) -> TonAccount:
out = self._run_command('account', [address])
data = {}
output_started = False
for line in out.splitlines():
if not output_started and "Succeeded." in line:
output_started = True
continue
if output_started:
if "Account not found" in line:
raise Exception("Account not found: {}".format(address))
tokens = line.split(":")
data[tokens[0].strip()] = tokens[1].strip()
return TonAccount(acc_type=data["acc_type"], balance=int(data.get("balance", 0).replace("nanoton", "").strip()),
last_paid=int(data.get("last_paid")), data=data.get("data(boc)"))
def call_command(self, address: str, command: str, payload: dict,
abi_url: str, private_key: str = None) -> Optional[dict]:
cmd = [address, command, str(json.dumps(payload)), "--abi", self._materialize_abi(abi_url)]
with secret_manager(secrets=[private_key]):
if private_key:
cmd.extend(['--sign', str(private_key)])
out = self._run_command('call', cmd)
data = self._parse_result(out)
log.debug("Tonoscli call: {}".format(out))
return data
def generate_key_pair_file(self, file_location, phrase):
with secret_manager(secrets=[phrase]):
return self._run_command("getkeypair", [file_location, phrase])
def submit_transaction(self, address, dest, value: int, payload, private_key, bounce=False, allBalance=False) -> TonTransaction:
with secret_manager(secrets=[private_key]):
transaction_payload = json.dumps({"dest": str(dest),
"value": str(value),
"bounce": bounce,
"allBalance": allBalance,
"payload": str(payload)})
out = self._run_command('call', [address, "submitTransaction", str(transaction_payload),
"--abi", self._abi_path, "--sign", str(private_key)])
data = self._parse_result(out)
log.debug("Tonoscli: {}".format(out))
return TonTransaction(tid=data.get("transId"))
def confirm_transaction(self, address: str, transaction_id: str, private_keys: List[str]) -> TonTransaction:
with secret_manager(secrets=private_keys):
for key in private_keys:
transaction_payload = json.dumps({"transactionId": transaction_id})
out = self._run_command('call', [address, "confirmTransaction", transaction_payload,
"--abi", self._abi_path, "--sign", key])
log.debug("Tonoscli: {}".format(out))
return TonTransaction(tid=transaction_id)
def depool_replenish(self, depool_addr: str, wallet_addr: str, value: TonCoin,
private_key: str, custodian_keys: List[str] = None) -> TonTransaction:
with secret_manager(secrets=[private_key]):
out = self._run_command('depool', ["--addr", depool_addr, "replenish", "--value", value.as_tokens(),
"--wallet", wallet_addr, "--sign", str(private_key)])
log.debug("Tonoscli: {}".format(out))
data = self._parse_result(out)
if data and custodian_keys:
self.confirm_transaction(wallet_addr, transaction_id=data.get("transId"), private_keys=custodian_keys)
return TonTransaction(tid=data.get("transId"))
def get_depool_events(self, depool_addr,
max: int = 100) -> List[DePoolEvent]:
out = self._run_command("depool", ["--addr", depool_addr, "events"])
log.debug("Tonoscli: {}".format(out))
events = []
current_event_id = None
current_event = None
for line in out.splitlines():
if not line or not line.strip():
# empty line indicates that next event is coming
current_event_id = None
current_event = None
continue
if line.startswith("event "):
current_event_id = line.split(" ")[1]
continue
if current_event_id and not current_event:
# new event started, but we don't know yet which one
event_name = line.split(" ")[0]
event_cls = DePoolEvent
if event_name == "TooLowDePoolBalance":
event_cls = DePoolLowBalanceEvent
elif event_name == "StakeSigningRequested":
event_cls = DePoolElectionEvent
current_event = event_cls(current_event_id, event_name)
continue
if current_event and line.startswith("{"):
current_event.set_data(line)
events.append(current_event)
if len(events) >= max:
break
return events
def terminate_depool(self, address, private_key: str):
with secret_manager(secrets=[private_key]):
transaction_payload = json.dumps({})
out = self._run_command('call', [address, "terminator", transaction_payload,
"--abi", self._abi_path, "--sign", str(private_key)])
log.debug("Tonoscli: {}".format(out))
def depool_ticktock(self, depool_address: str, wallet_address: str, private_key: str,
custodian_keys: List[str] = None):
with secret_manager(secrets=[private_key]):
out = self._run_command("depool", ["--addr", depool_address, "ticktock",
"-w", wallet_address, "--sign", str(private_key)])
log.debug("Tonoscli: {}".format(out))
data = self._parse_result(out)
if custodian_keys:
self.confirm_transaction(wallet_address, transaction_id=data.get("transId"), private_keys=custodian_keys)
|
#!/usr/bin/env python3
#
# Filter coverage reported by lcov
#
# Copyright 2020 BlueKitchen GmbH
#
import sys
blacklist = [
'/opt/local',
'3rd-party/yxml',
'3rd-party/tinydir',
'chipset/zephyr',
'platform/embedded/btstack_audio_embedded.c',
'platform/embedded/btstack_em9304_spi_embedded.c',
'platform/embedded/btstack_stdin_embedded.c',
'platform/embedded/btstack_tlv_flash_bank.c',
'platform/embedded/btstack_uart_block_embedded.c',
'platform/embedded/hal_flash_bank_memory.c',
'platform/freertos/btstack_run_loop_freertos.c',
'platform/freertos/btstack_uart_block_freertos.c',
'platform/libusb',
'platform/posix',
'port/libusb',
'src/ble/ancs_client.c',
'src/ble/le_device_db_memory.c',
'src/ble/gatt-service/cycling_power_service_server.c',
'src/ble/gatt-service/cycling_speed_and_cadence_service_server.c',
'src/ble/gatt-service/heart_rate_service_server.c',
'src/ble/gatt-service/hids_device.c',
'src/ble/gatt-service/nordic_spp_service_server.c',
'src/ble/gatt-service/ublox_spp_service_server.c',
'src/btstack_audio.c',
'src/btstack_base64_decoder.c',
'src/btstack_event.h',
'src/btstack_hid_parser.c',
'src/btstack_resample.c',
'src/btstack_slip.c',
'src/hci_transport_em9304_spi.c',
'src/hci_transport_h5.c',
'src/mesh/',
'src/classic',
]
def include_file(filename):
for pattern in blacklist:
if pattern in filename:
print("Skip " + filename)
return False
return True
if len(sys.argv) != 3:
print ('lcov .info filter')
print ('Usage: ', sys.argv[0], 'input.info output.info')
exit(0)
infile = sys.argv[1]
outfile = sys.argv[2]
with open(infile, 'rt') as fin:
with open(outfile, 'wt') as fout:
mirror = False
read_tn = False
for line in fin:
line = line.strip()
if line == 'TN:':
read_tn = True
continue
if line == 'end_of_record':
if mirror:
fout.write(line+'\n')
mirror = False
continue
parts = line.split(':')
if len(parts) == 2 and parts[0] == 'SF':
filename = parts[1]
mirror = include_file(filename)
if mirror and read_tn:
fout.write("TN:\n")
read_tn = False
if not mirror:
continue
fout.write(line+"\n")
|
#!/usr/bin/env python
'''
Python + Ansible - Class 3 - Exercise 2
Gleydson Mazioli da Silva <gleydsonmazioli@gmail.com>
I created this program with a different concept: Data saving and load using
a yaml or json file. So the system save system resources and can be run throught
a cron or anacron job.
'''
import snmp_helper
import yaml
import json
import sys
import pygal
# pylint: disable=C0103
# pylint: disable=line-too-long
my_ip = '50.76.53.27'
my_user = 'pysnmp'
my_pass = 'galileo1'
my_enckey = 'galileo1'
my_host = (my_ip, 7961)
verbose = True
# File format should be json or yaml
file_fmt = 'json'
def save_data(l_var):
'''
Save data
'''
try:
with open('exercise3-2.'+file_fmt, 'w') as fhandler:
if file_fmt == 'yaml':
fhandler.write(yaml.dump(l_var, default_flow_style=False))
elif file_fmt == 'json':
json.dump(l_var, fhandler)
else:
print 'Unknown format: %s' % (file_fmt)
sys.exit(1)
except IOError:
print 'An error happened: '
def load_saved_data(l_default):
'''
Load previous saved data
'''
try:
with open('exercise3-2.'+file_fmt, 'r') as fhandler:
if file_fmt == 'yaml':
file_data = yaml.load(fhandler)
elif file_fmt == 'json':
file_data = json.load(fhandler)
else:
sys.exit('File Read: Invalid file format: '+file_fmt)
except IOError:
if verbose:
print 'File not found: exercise3-2.'+file_fmt
return l_default
return file_data
def get_snmp_data(router, snmp_user, miboid):
'''
Get and return snmp data
'''
snmp_data = snmp_helper.snmp_extract(snmp_helper.snmp_get_oid_v3(router, snmp_user, oid=miboid))
return snmp_data
def generate_graphic(l_data):
'''
Generate a SVG graphic using data passed as an argument
'''
graph_stats = {
"in_octets": [],
"out_octets": [],
"in_ucast_pkts": [],
"out_ucast_pkts": []
}
for l_label in ("in_octets", "out_octets", "in_ucast_pkts", "out_ucast_pkts"):
l_old_value = 0
for i in range(0, len(l_data)):
l_value = l_data[i][l_label]
if l_old_value == 0:
l_diff = 0
else:
l_diff = int(l_value)-int(l_old_value)
if verbose:
print 'xxxxx: %s, diff: %s, (old: %s)' % (l_value, l_diff, l_old_value)
graph_stats[l_label].append(l_diff)
l_old_value = l_value
if verbose:
print graph_stats
line_chart = pygal.Line()
line_chart.title = 'Input/Output bytes and Unicast'
line_chart.add('InBytes', graph_stats['in_octets'])
line_chart.add('OutBytes', graph_stats['out_octets'])
line_chart.add('InUnicast', graph_stats['in_ucast_pkts'])
line_chart.add('OutUnicast', graph_stats['out_ucast_pkts'])
line_chart.render_to_file('exercise2.svg')
def main():
'''
Main Function
'''
snmp_user = (my_user, my_pass, my_enckey)
if_ifdescr = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.2.5')
if_in_octets = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.10.5')
if_out_octets = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.16.5')
if_in_ucast_pkts = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.11.5')
if_out_ucast_pkts = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.17.5')
print 'Using file format: %s' % (file_fmt)
old_data_list = load_saved_data(0)
# pylint: disable=maybe-no-member
if old_data_list == 0:
old_if_ifdescr = if_ifdescr
old_if_in_octets = if_in_octets
old_if_out_octets = if_out_octets
old_if_in_ucast_pkts = if_in_ucast_pkts
old_if_out_ucast_pkts = if_out_ucast_pkts
data_list = range(0)
else:
old_if_ifdescr = old_data_list[-1]['ifdescr']
old_if_in_octets = old_data_list[-1]['in_octets']
old_if_out_octets = old_data_list[-1]['out_octets']
old_if_in_ucast_pkts = old_data_list[-1]['in_ucast_pkts']
old_if_out_ucast_pkts = old_data_list[-1]['out_ucast_pkts']
data_list = old_data_list
if verbose:
print 'IfDescr: %s (last: %s)' % (if_ifdescr, old_if_ifdescr)
print 'InOctets %s (last: %s)' % (if_in_octets, old_if_in_octets)
print 'OutOctets %s (last: %s)' % (if_out_octets, old_if_out_octets)
print 'In Ucast %s (last: %s)' % (if_in_ucast_pkts, old_if_in_ucast_pkts)
print 'Out Ucast %s (last: %s)' % (if_out_ucast_pkts, old_if_out_ucast_pkts)
# Array preparation to save data
data_list.append({})
data_list[-1]['ifdescr'] = if_ifdescr
data_list[-1]['in_octets'] = if_in_octets
data_list[-1]['out_octets'] = if_out_octets
data_list[-1]['in_ucast_pkts'] = if_in_ucast_pkts
data_list[-1]['out_ucast_pkts'] = if_out_ucast_pkts
save_data(data_list)
generate_graphic(data_list)
if verbose:
print '----------------------------'
if __name__ == "__main__":
main()
quit()
|
import asyncio
import logging
import sys
import weakref
if sys.version_info >= (3, 7):
def get_loop(task):
return task.get_loop()
else:
def get_loop(task):
return task._loop
class StreamSink:
def __init__(self, stream):
self._stream = stream
self._flushable = callable(getattr(stream, "flush", None))
self._stoppable = callable(getattr(stream, "stop", None))
self._completable = asyncio.iscoroutinefunction(getattr(stream, "complete", None))
def write(self, message):
self._stream.write(message)
if self._flushable:
self._stream.flush()
def stop(self):
if self._stoppable:
self._stream.stop()
async def complete(self):
if self._completable:
await self._stream.complete()
class StandardSink:
def __init__(self, handler):
self._handler = handler
def write(self, message):
record = message.record
message = str(message)
exc = record["exception"]
record = logging.getLogger().makeRecord(
record["name"],
record["level"].no,
record["file"].path,
record["line"],
message,
(),
(exc.type, exc.value, exc.traceback) if exc else None,
record["function"],
record["extra"],
)
if exc:
record.exc_text = "\n"
self._handler.handle(record)
def stop(self):
self._handler.close()
async def complete(self):
pass
class AsyncSink:
def __init__(self, function, loop):
self._function = function
self._loop = loop
self._tasks = weakref.WeakSet()
def write(self, message):
coro = self._function(message)
loop = self._loop or asyncio.get_event_loop()
task = loop.create_task(coro)
self._tasks.add(task)
def stop(self):
for task in self._tasks:
task.cancel()
async def complete(self):
loop = asyncio.get_event_loop()
for task in self._tasks:
if get_loop(task) is loop:
await task
def __getstate__(self):
state = self.__dict__.copy()
state["_tasks"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._tasks = weakref.WeakSet()
class CallableSink:
def __init__(self, function):
self._function = function
def write(self, message):
self._function(message)
def stop(self):
pass
async def complete(self):
pass
|
import xml.etree.ElementTree as ET
tree = ET.ElementTree(file='FR.xml')
indent = 0
ignoreElems = ['/word/document.xml']
def printRecur(root, f):
"""Recursively prints the tree."""
# print('root tag:', root.tag, root.text)
global indent
if root.tag in ignoreElems:
return
f.write(' ' * indent + '%s: %s' % (root.tag.title(), root.attrib.get('name', root.text))+"\r")
# print (' ' * indent + '%s: %s' % (root.tag.title(), root.attrib.get('name', root.text)))
indent += 4
for elem in root.getchildren():
printRecur(elem, f)
indent -= 4
path_w = 'FR.txt'
f = open(path_w, mode='w', encoding='utf-8')
root = tree.getroot()
printRecur(root, f)
|
#!/usr/bin/env python3
from qsilInterpreter import Object, Pointer, Interpreter, Bytecode, SpecialIDs, VisibilityTypes, QSIL_TYPE_DIRECTOBJECT, QSIL_TYPE_DIRECTPOINTEROBJECT
from struct import pack
printBytecodes = False
classClassInstVars = []
methodClassInstVars = []
bootstrapPtr = None
class QSILClass(object):
def __init__(self):
self.name = b'Object'
self.type = b'subclass:'
self.superclass = b'Object'
self.classId = 0
self.instancevariables = []
self.classvariables = []
self.methods = []
def __repr__(self):
return f'[class {self.name.decode("utf-8")}, methods: {self.methods}]'
def asQSILObject(self, parser):
global classClassInstVars
ret = Object()
ret.classId = SpecialIDs.CLASS_CLASS_ID
ret.objId = self.classId
serializedInstVars = []
classClassInstVars = classClassInstVars or parser.classes[b"Class"].instancevariables
for var in classClassInstVars:
#print(var)
if var == b'type':
serializedInstVars.append(parser.qsilStringPtr(self.type))
elif var == b'className':
serializedInstVars.append(parser.qsilStringPtr(self.name))
elif var == b'superclass':
ptr = Pointer()
if self.superclass == self.name:
ptr.objId = ret.objId
else:
ptr.objId = parser.classes[self.superclass].objId
serializedInstVars.append(ptr)
elif var == b'instVarNames':
if self.superclass == self.name:
varNames = []
else:
superclass = parser.classes[self.superclass].pyObjStorage[3]
superclassInstVars = parser.objects[superclass.objId].pyObjStorage
varNames = [x for x in superclassInstVars]
varNames += [parser.qsilStringPtr(varName) for varName in self.instancevariables]
serializedInstVars.append(parser.qsilOrderedCollectionPtr(varNames))
elif var == b'classVarNames':
varNames = [parser.qsilStringPtr(varName) for varName in self.classvariables]
serializedInstVars.append(parser.qsilOrderedCollectionPtr(varNames))
elif var == b'methods':
serializedMethods = []
for method in self.methods:
serializedMethods.append(method.asQSILObject(parser))
serializedInstVars.append(parser.qsilOrderedCollectionPtr(serializedMethods))
else:
print(var)
serializedInstVars.append(parser.qsilNumberPtr(0))
ret.setMem(serializedInstVars)
parser.objects[ret.objId] = ret
parser.classes[self.name] = ret
class QSILMethod(object):
def __init__(self):
self.name = b''
self.visibility = []
self.args = []
self.bytecodes = b''
self.literalPtrs = []
self.objId = 0
self.numTemps = 0
self._class = None
def __repr__(self):
return b' '.join(self.visibility).decode('utf-8') + ' #' + self.name.decode('utf-8')
def asQSILObject(self, parser):
global methodClassInstVars
ret = Object()
ret.classId = SpecialIDs.METHOD_CLASS_ID
ret.objId = self.objId
serializedInstVars = []
methodClassInstVars = methodClassInstVars or parser.classes[b"Method"].instancevariables
for var in methodClassInstVars:
if var == b'methodName':
serializedInstVars.append(parser.qsilStringPtr(self.name))
elif var == b'visibility':
visibilityNum = 0
if b'private' in self.visibility:
visibilityNum |= VisibilityTypes.PRIVATE
if b'protected' in self.visibility:
visibilityNum |= VisibilityTypes.PROTECTED
if b'public' in self.visibility:
visibilityNum &= 0b1
if b'static' in self.visibility:
visibilityNum |= VisibilityTypes.STATIC
serializedInstVars.append(parser.qsilNumberPtr(visibilityNum))
elif var == b'args':
serializedInstVars.append(parser.qsilNumberPtr(len(self.args)))
elif var == b'bytecodes':
finalBytecodes = b''
for bc in self.bytecodes:
if isinstance(bc, bytes):
finalBytecodes += bc
else:
finalBytecodes += bytes([bc])
serializedInstVars.append(parser.qsilStringPtr(finalBytecodes))
elif var == b'literals':
serializedInstVars.append(parser.qsilOrderedCollectionPtr(self.literalPtrs))
elif var == b'numTemps':
print("At some point, this should be implemented. It's kinda important ish")
elif var == b'class':
ptr = Pointer()
ptr.objId = self._class
serializedInstVars.append(ptr)
else:
print(var)
serializedInstVars.append(parser.qsilNumberPtr(0))
ret.setMem(serializedInstVars)
parser.objects[ret.objId] = ret
return Pointer.forObject(ret)
specials = [b'+', b',' b'-', b'/', b'*', b'>',
b'<', b'<=',b'>=', b'=', b'~=', b'==',
b'~==', b'&&', b'||', b'\\']
class Parser(object):
def __init__(self, infile):
self.stream = infile
self.objects = {}
self.blockContexts = []
self.currObjectId = SpecialIDs.numObjs
self.classes = {}
def peek(self, length=1):
pos = self.stream.tell()
ret = self.stream.read(length)
self.stream.seek(pos)
return ret
def skipwhitespace(self):
while self.peek() in b' \t\r\n':
self.stream.read(1)
def readToken(self):
tok = b''
self.skipwhitespace()
if self.peek() in b')].':
return self.stream.read(1)
while self.peek() not in b' \t\r\n)].':
tok += self.stream.read(1)
return tok
def peekToken(self, num = 1):
pos = self.stream.tell()
ret = self.readToken()
if num > 1:
ret = [ret]
for _ in range(num - 1):
ret.append(self.readToken())
self.stream.seek(pos)
return ret
def readString(self):
string = b''
self.skipwhitespace()
assert self.stream.read(1) == b'\''
while True:
# Lazy, but we're assuming all strings have been terminated
char = self.stream.read(1)
if char == b'\'':
if self.peek() == b'\'':
self.stream.read(1) # Consume one
else:
break
string += char
return string
def nextObjectId(self):
ret = self.currObjectId
self.currObjectId += 1
return ret
def qsilStringPtr(self, string):
qsilString = Object()
qsilString.classId = SpecialIDs.BYTESTRING_CLASS_ID
qsilString.type = QSIL_TYPE_DIRECTOBJECT
qsilString.setMem(string)
qsilString.objId = self.nextObjectId()
self.objects[qsilString.objId] = qsilString
return Pointer.forObject(qsilString)
def qsilSymbolPtr(self, string):
qsilSymbol = Object()
qsilSymbol.classId = SpecialIDs.SYMBOL_CLASS_ID
qsilSymbol.type = QSIL_TYPE_DIRECTOBJECT
qsilSymbol.setMem(string)
qsilSymbol.objId = self.nextObjectId()
self.objects[qsilSymbol.objId] = qsilSymbol
return Pointer.forObject(qsilSymbol)
def qsilCharacterPtr(self, char):
qsilCharacter = Object()
qsilCharacter.classId = SpecialIDs.CHARACTER_CLASS_ID
qsilCharacter.type = QSIL_TYPE_DIRECTOBJECT
qsilCharacter.setMem(bytes([char]))
qsilCharacter.objId = self.nextObjectId()
self.objects[qsilCharacter.objId] = qsilCharacter
return Pointer.forObject(qsilCharacter)
def qsilNumberPtr(self, num):
qsilNumber = Object()
qsilNumber.classId = SpecialIDs.INTEGER_CLASS_ID if isinstance(num, int) else SpecialIDs.FLOAT_CLASS_ID
qsilNumber.type = QSIL_TYPE_DIRECTOBJECT
numToBytes = pack("<i", num)
qsilNumber.setMem(numToBytes)
qsilNumber.objId = self.nextObjectId()
self.objects[qsilNumber.objId] = qsilNumber
return Pointer.forObject(qsilNumber)
def qsilOrderedCollectionPtr(self, objects):
qsilOrderedCollection = Object()
qsilOrderedCollection.classId = SpecialIDs.ORDEREDCOLLECTION_CLASS_ID
qsilOrderedCollection.type = QSIL_TYPE_DIRECTPOINTEROBJECT
qsilOrderedCollection.setMem(objects)
qsilOrderedCollection.objId = self.nextObjectId()
self.objects[qsilOrderedCollection.objId] = qsilOrderedCollection
return Pointer.forObject(qsilOrderedCollection)
def qsilBlockContextPtr(self, bytecodes, newliterals, methodargs):
qsilBlockContext = Object()
qsilBlockContext.classId = SpecialIDs.BLOCKCONTEXT_CLASS_ID
pcPtr = self.qsilNumberPtr(0)
stackPtr = self.qsilOrderedCollectionPtr([])
receiverPtr = Pointer()
receiverPtr.objId = SpecialIDs.NIL_OBJECT_ID
tempvarsPtr = self.qsilOrderedCollectionPtr([])
argsPtr = self.qsilOrderedCollectionPtr([])
parentContextPtr = Pointer()
parentContextPtr.objId = SpecialIDs.NIL_OBJECT_ID
bytecodesPtr = self.qsilStringPtr(bytecodes)
literalsPtr = self.qsilOrderedCollectionPtr(newliterals)
homePtr = parentContextPtr
bcMem = [pcPtr, stackPtr, receiverPtr, tempvarsPtr, parentContextPtr, argsPtr, literalsPtr, bytecodesPtr, homePtr]
qsilBlockContext.setMem(bcMem)
qsilBlockContext.objId = self.nextObjectId()
self.objects[qsilBlockContext.objId] = qsilBlockContext
self.blockContexts.append(qsilBlockContext)
return Pointer.forObject(qsilBlockContext)
def pointerToLiteralOrderedCollection(self):
self.skipwhitespace()
assert self.stream.read(2) == b'#('
objs = []
tok = self.peekToken()
while tok != b')':
if tok.startswith(b'\''):
strVal = self.readString()
objs.append(self.qsilStringPtr(strVal))
elif tok.startswith(b'#'):
if tok.startswith(b'#('):
objs.append(self.pointerToLiteralOrderedCollection())
else:
symbVal = self.readToken()[1:]
objs.append(self.qsilSymbolPtr(symbVal))
elif tok.startswith(b'$'):
self.stream.read(1)
objs.append(self.qsilCharacterPtr(self.stream.read(1)))
elif tok.startswith(b'"'):
self.consumeComment()
else:
if (tok[0:1].isupper() or tok[0:1].islower()):
objs.append(['latebindliteral', self.readToken()])
else:
if b'.' in self.peekToken(2)[1]:
numVal = float(b''.join(self.peekToken(3)))
self.readToken()
self.readToken()
self.readToken()
else:
numVal = int(self.readToken())
objs.append(self.qsilNumberPtr(numVal))
tok = self.peekToken()
self.skipwhitespace()
self.stream.read(1)
return self.qsilOrderedCollectionPtr(objs)
def consumeComment(self):
self.skipwhitespace()
if self.peek() == b'"':
self.stream.read(1)
while self.peek() != b'"':
self.stream.read(1)
self.stream.read(1)
def methodToBytecodes(self, methodargs, addReturn = True, declaredVariables = None, literalPtrs = None):
bytecodes = []
declaredVariables = declaredVariables or []
numTemps = [0]
literalPtrs = literalPtrs or []
if literalPtrs:
literals = [None for _ in literalPtrs]
else:
literals = []
def readObject():
tok = self.peekToken()
if tok in [b'true', b'false', b'self', b'super', b'nil']:
if tok == b'true':
bytecodes.append(Bytecode.PUSH_TRUE)
elif tok == b'false':
bytecodes.append(Bytecode.PUSH_FALSE)
elif tok == b'self':
bytecodes.append(Bytecode.PUSH_SELF)
elif tok == b'super':
bytecodes.append(Bytecode.PUSH_SUPER)
elif tok == b'nil':
bytecodes.append(Bytecode.PUSH_NIL)
self.readToken()
elif tok in declaredVariables:
# Push tempvar by index onto the stack
index = declaredVariables.index(tok)
bytecodes.append(Bytecode.PUSH_TEMP) # pushTemp:
bytecodes.append(bytes([index]))
self.readToken()
elif tok in methodargs:
index = methodargs.index(tok)
bytecodes.append(Bytecode.PUSH_ARG) # pushArg:
bytecodes.append(bytes([index]))
self.readToken()
elif tok.startswith(b'#'):
# Either an OrderedCollection or symbol
if tok.startswith(b'#('):
literals.append(None)
literalPtrs.append(self.pointerToLiteralOrderedCollection())
ptrVal = len(literals) - 1
else:
symbVal = self.readToken()[1:]
if symbVal in literals:
ptrVal = literals.index(symbVal)
else:
literalPtrs.append(self.qsilSymbolPtr(symbVal))
literals.append(symbVal)
ptrVal = len(literals) - 1
bytecodes.append(Bytecode.PUSH_LITERAL) # PUSH_LITERAL
bytecodes.append(bytes([ptrVal]))
#print(bytecodes, literals)
elif tok.startswith(b'\''):
strVal = self.readString()
if strVal in literals:
strPtr = literals.index(strVal)
else:
literalPtrs.append(self.qsilStringPtr(strVal))
literals.append(strVal)
strPtr = len(literals) - 1
bytecodes.append(Bytecode.PUSH_LITERAL) # PUSH_LITERAL
bytecodes.append(bytes([strPtr]))
#print(bytecodes, literals)
elif tok.startswith(b'('):
self.skipwhitespace()
self.stream.read(1)
bytecodeOneLine()
self.skipwhitespace()
assert self.stream.read(1) == b')'
elif tok.startswith(b'['):
self.skipwhitespace()
self.stream.read(1)
argNames = []
while self.peekToken().startswith(b':'):
# Arguments to this block
argNames.append(self.readToken()[1:])
if argNames:
assert self.readToken() == b'|'
bc, newliterals = self.methodToBytecodes(methodargs + argNames, False, declaredVariables[:], literalPtrs[:])
ctxPtr = self.qsilBlockContextPtr(bc, newliterals, methodargs)
literalPtrs.append(ctxPtr)
literals.append(None)
self.skipwhitespace()
bytecodes.append(Bytecode.PUSH_LITERAL) # PUSH_LITERAL
bytecodes.append(bytes([len(literals) - 1]))
assert self.stream.read(1) == b']'
else:
# Try to read an integer or float, or if that fails, assume it's
# a class name
if (tok[0:1].isupper() or tok[0:1].islower()):
bytecodes.append(['latebindliteral', self.readToken()])
else:
if b'.' in self.peekToken(2)[1]:
numVal = float(b''.join(self.peekToken(3)))
self.readToken()
self.readToken()
self.readToken()
else:
numVal = int(self.readToken())
if numVal in literals:
numPtr = literals.index(numVal)
else:
literalPtrs.append(self.qsilNumberPtr(numVal))
literals.append(numVal)
numPtr = len(literals) - 1
bytecodes.append(Bytecode.PUSH_LITERAL) # PUSH_LITERAL
bytecodes.append(bytes([numPtr]))
def readSelector(canHaveArgs):
self.skipwhitespace()
if self.peek() in [b')', b'.', b']']:
return # No selector, just have a literal or something
selName = self.peekToken()
if not canHaveArgs:
if ((selName.endswith(b':') or
selName in specials)):
return
self.readToken()
while canHaveArgs:
if (selName.endswith(b':') or selName in specials):
readObject()
readSelector(False)
if selName in specials or (not selName.endswith(b':')):
break
while self.peekToken().endswith(b':'):
selName += self.readToken()
bytecodeOneLine(False)
break
# Push a call for ourselves onto the stack
#print("Got here")
if selName in literals:
ptrVal = literals.index(selName)
else:
literalPtrs.append(self.qsilSymbolPtr(selName))
literals.append(selName)
ptrVal = len(literals) - 1
bytecodes.append(Bytecode.PUSH_LITERAL)
bytecodes.append(bytes([ptrVal]))
bytecodes.append(Bytecode.CALL)
#print(bytecodes)
def bytecodeOneLine(canHaveArgs = True, popAfterwards = True):
# Read an initial object
while self.peekToken().startswith(b'"'):
self.consumeComment()
if self.peekToken(2)[1] == b':=':
# Read the rvalue and then push a bytecode to write by index
if self.peekToken() in declaredVariables:
varIndex = declaredVariables.index(self.readToken())
self.readToken() # Consume := symbol
bytecodeOneLine(canHaveArgs, False)
bytecodes.append(Bytecode.POP_INTO_TEMP) # popIntoTemp:
bytecodes.append(bytes([varIndex]))
else:
# Another late bind, this time as an lvalue
lvalueName = self.readToken()
self.readToken() # Consume := symbol
bytecodeOneLine(canHaveArgs, False)
bytecodes.append(['latebindlvalue', lvalueName])
if self.peekToken() == b'.':
self.readToken()
if popAfterwards:
bytecodes.append(Bytecode.POP) # POP
return
if self.peekToken().startswith(b'^'):
self.skipwhitespace()
self.stream.read(1)
bytecodeOneLine()
bytecodes.append(Bytecode.RETURN) # RETURN
elif self.peekToken() == b'|':
# Read tempvars
varNames = b''
self.readToken()
while self.peek() != b'|':
varNames += self.stream.read(1)
self.stream.read(1)
newVars = [x for x in varNames.strip().split(b' ') if x]
numTemps[0] += len(newVars)
declaredVariables.extend(newVars)
elif self.peekToken() != b']':
readObject()
self.consumeComment()
while self.peekToken() not in [b')', b']']:
self.consumeComment()
readSelector(canHaveArgs)
self.consumeComment()
if self.peekToken() == b'.':
if popAfterwards:
self.readToken()
bytecodes.append(Bytecode.POP) # POP
break
# Push bytecode to pop one off the stack
while self.peekToken() != b']':
bytecodeOneLine()
if addReturn:
bytecodes.append(Bytecode.PUSH_SELF)
bytecodes.append(Bytecode.RETURN)
return (bytecodes, literalPtrs)
def readMethod(self, forClass):
assert self.stream.read(1) == b'['
self.consumeComment()
newMethod = QSILMethod()
newMethod.objId = self.nextObjectId()
tok = self.readToken()
visibility = []
while tok in [b'public', b'private', b'protected', b'static']:
visibility.append(tok)
tok = self.readToken()
newMethod.visibility = visibility
funcName = tok
args = []
if (funcName.endswith(b':') or
funcName in specials):
args.append(self.readToken())
if not funcName in specials:
while self.peekToken().endswith(b':'):
funcName += self.readToken()
args.append(self.readToken())
newMethod.name = funcName
newMethod.args = args
# Implement actual method parsing and bytecodes later
self.skipwhitespace()
specialBytecodes = []
if self.peek() == b'<':
# Something special, it's telling us which bytecodes
assert self.readToken() == b'<bytecodes'
bytecodes = [bytes([int(bc, 16)]) for bc in self.readString().split(b' ')]
specialBytecodes = bytecodes
self.skipwhitespace()
assert self.stream.read(1) == b'>'
bytecodes, literalPtrs = self.methodToBytecodes(args)
newMethod.bytecodes = specialBytecodes + bytecodes
newMethod.literalPtrs = literalPtrs
newMethod._class = forClass
while self.peekToken() != b']': # Maybe it's part of the method, no whitespace. FIXME
self.readToken()
self.skipwhitespace()
assert self.stream.read(1) == b']'
self.consumeComment()
return newMethod
def readMethods(self, forClass):
self.skipwhitespace()
assert self.stream.read(2) == b'#('
self.consumeComment()
self.skipwhitespace()
methods = []
while self.peek() == b'[':
methods.append(self.readMethod(forClass))
self.skipwhitespace()
assert self.stream.read(1) == b')'
return methods
def readclass(self):
self.skipwhitespace()
assert self.stream.read(1) == b'['
self.consumeComment()
newClass = QSILClass()
newClass.superclass = self.readToken()
newClass.type = self.readToken()
newClass.name = self.readToken()[1:]
if newClass.name == b'Object':
newClass.classId = SpecialIDs.OBJECT_CLASS_ID
elif newClass.name == b'ByteString':
newClass.classId = SpecialIDs.BYTESTRING_CLASS_ID
elif newClass.name == b'Character':
newClass.classId = SpecialIDs.CHARACTER_CLASS_ID
elif newClass.name == b'Integer':
newClass.classId = SpecialIDs.INTEGER_CLASS_ID
elif newClass.name == b'Class':
newClass.classId = SpecialIDs.CLASS_CLASS_ID
elif newClass.name == b'Method':
newClass.classId = SpecialIDs.METHOD_CLASS_ID
elif newClass.name == b'MethodContext':
newClass.classId = SpecialIDs.METHODCONTEXT_CLASS_ID
elif newClass.name == b'BlockContext':
newClass.classId = SpecialIDs.BLOCKCONTEXT_CLASS_ID
elif newClass.name == b'Float':
newClass.classId = SpecialIDs.FLOAT_CLASS_ID
elif newClass.name == b'Symbol':
newClass.classId = SpecialIDs.SYMBOL_CLASS_ID
elif newClass.name == b'OrderedCollection':
newClass.classId = SpecialIDs.ORDEREDCOLLECTION_CLASS_ID
elif newClass.name == b'True':
newClass.classId = SpecialIDs.TRUE_CLASS_ID
elif newClass.name == b'False':
newClass.classId = SpecialIDs.FALSE_CLASS_ID
elif newClass.name == b'UndefinedObject':
newClass.classId = SpecialIDs.UNDEFINEDOBJECT_CLASS_ID
elif newClass.name == b'QSILImage':
newClass.classId = SpecialIDs.QSILIMAGE_CLASS_ID
else:
newClass.classId = self.nextObjectId()
assert self.readToken() == b'instanceVariableNames:'
newClass.instancevariables = [x for x in self.readString().split(b' ') if x]
assert self.readToken() == b'classVariableNames:'
newClass.classvariables = [x for x in self.readString().split(b' ') if x]
assert self.readToken() == b'methods:'
newClass.methods = self.readMethods(newClass.classId)
if newClass.name == b'Bootstrap':
global bootstrapPtr
bootstrapPtr = Pointer.forObject(newClass.methods[0])
self.skipwhitespace()
assert self.stream.read(1) == b']'
return newClass
def doLateBinds(self):
# Fix all sorts of references and stuff
classInstVars = {}
classClassVars = {}
classNames = {}
for eachClass in self.classes.values():
instVars = []
classVars = []
classNames[eachClass.name] = eachClass
if eachClass.superclass in classInstVars:
instVars.extend(classInstVars[eachClass.superclass])
classVars.extend(classClassVars[eachClass.superclass])
instVars.extend(eachClass.instancevariables)
classVars.extend(eachClass.classvariables)
classInstVars[eachClass.name] = instVars
classClassVars[eachClass.name] = classVars
#print(classInstVars)
#print(classClassVars)
#print(classNames)
def fixBytecodes(originalbytecodes):
newbytecodes = []
for bc in originalbytecodes:
if isinstance(bc, list):
if bc[0] == 'latebindliteral':
search = bc[1]
if search in instVars:
newbytecodes.append(Bytecode.PUSH_INSTVAR)
newbytecodes.append(bytes([instVars.index(search)]))
elif search in classNames:
newbytecodes.append(Bytecode.PUSH_OBJ_REF)
newbytecodes.append(pack("<i", classNames[search].classId))
else:
print(f"Unknown {bc}")
elif bc[0] == 'latebindlvalue':
search = bc[1]
if search in instVars:
newbytecodes.append(Bytecode.POP_INTO_INSTVAR)
newbytecodes.append(bytes([instVars.index(search)]))
else:
print(f"Unknown {bc}")
else:
print(f"UNKNOWN {bc}")
else:
newbytecodes.append(bc)
return newbytecodes
# Needs a second pass after getting the names of each
# instance variable, class variable, and existing classes
for eachClass in self.classes.values():
instVars = classInstVars[eachClass.name]
classVars = classClassVars[eachClass.name]
for method in eachClass.methods:
method.bytecodes = fixBytecodes(method.bytecodes)
for bc in self.blockContexts:
bytecodeObject = self.objects[bc.pyObjStorage[-2].objId]
fixedBc = b''
for bc in fixBytecodes(bytecodeObject.pyObjStorage):
if isinstance(bc, bytes):
fixedBc += bc
else:
fixedBc += bytes([bc])
bytecodeObject.pyObjStorage = fixedBc
def readall(self):
self.skipwhitespace()
while self.peek() == b'"':
self.consumeComment()
self.skipwhitespace()
self.skipwhitespace()
assert self.peek() == b'['
while (self.peek()):
self.consumeComment()
self.skipwhitespace()
eachClass = self.readclass()
self.classes[eachClass.name] = eachClass
self.doLateBinds()
if printBytecodes:
for eachClass in self.classes.values():
if eachClass.methods:
clsId = pack("<i", eachClass.classId)
print(f"***CLASS {clsId} {eachClass.name}***")
for method in eachClass.methods:
print(f"\t***FUNCTION #{method.name} ***")
print(f"\t\t**BYTECODES**")
for bc in method.bytecodes:
print('\t\t{}'.format(bc))
print("\n\t\t**LITERALS**")
for lit in method.literalPtrs:
print('\t\t{}'.format(self.objects[lit.objId]))
#pprint.pprint([funcName, bytecodes, [self.objects[x.objId] for x in literalPtrs]])
print(f"\n\t***END FUNCTION #{method.name} ***")
print(f"***END CLASS {eachClass.name}***")
else:
print(f"***EMPTY CLASS {eachClass.name}***")
for context in self.blockContexts:
print("***BLOCKCONTEXT***")
print("\t**BYTECODES**")
for bc in self.objects[context.pyObjStorage[0].objId].pyObjStorage:
print('\t{}'.format(bc))
print("\n\t**LITERALS**")
for lit in self.objects[context.pyObjStorage[1].objId].pyObjStorage:
print('\t{}'.format(self.objects[lit.objId]))
#pprint.pprint([funcName, bytecodes, [self.objects[x.objId] for x in literalPtrs]])
print("\n***END BLOCKCONTEXT***")
# Now that all the bytecodes are complete, serialize all the classes into the proper QSIL format
for eachClass in self.classes.values():
eachClass.asQSILObject(self)
# And finally, make sure to create some of the
# singleton objects (nil, true, false, etc.)
nil = Object()
nil.objId = SpecialIDs.NIL_OBJECT_ID
nil.classId = SpecialIDs.UNDEFINEDOBJECT_CLASS_ID
self.objects[nil.objId] = nil
trueObj = Object()
trueObj.objId = SpecialIDs.TRUE_OBJECT_ID
trueObj.classId = SpecialIDs.TRUE_CLASS_ID
self.objects[trueObj.objId] = trueObj
falseObj = Object()
falseObj.objId = SpecialIDs.FALSE_OBJECT_ID
falseObj.classId = SpecialIDs.FALSE_CLASS_ID
self.objects[falseObj.objId] = falseObj
# Also need QSILImage object, but that'll be a bit later
qsilImage = Object()
qsilImage.objId = SpecialIDs.QSIL_IMAGE_ID
qsilImage.classId = SpecialIDs.QSILIMAGE_CLASS_ID
self.objects[qsilImage.objId] = qsilImage
allClassesPtr = self.qsilOrderedCollectionPtr(list([Pointer.forObject(x) for x in self.objects.values()]))
qsilImage.setMem([allClassesPtr])
# Now we need to create a MethodContext pointing to the
# beginning of the Bootstrap>bootstrap method
bootstrapCtx = Object()
bootstrapCtx.objId = self.nextObjectId()
bootstrapCtx.classId = SpecialIDs.METHODCONTEXT_CLASS_ID
pcPtr = self.qsilNumberPtr(0)
stackPtr = self.qsilOrderedCollectionPtr([])
receiverPtr = Pointer()
receiverPtr.objId = SpecialIDs.NIL_OBJECT_ID
tempvarsPtr = self.qsilOrderedCollectionPtr([])
parentContextPtr = Pointer()
parentContextPtr.objId = SpecialIDs.NIL_OBJECT_ID
argsPtr = self.qsilOrderedCollectionPtr([])
bootstrapCtx.setMem([pcPtr, stackPtr, receiverPtr, tempvarsPtr, parentContextPtr, argsPtr, bootstrapPtr])
self.objects[bootstrapCtx.objId] = bootstrapCtx
outputBytes = b''
outputBytes += pack("<i", len(self.objects))
for obj in sorted(self.objects.values(), key=(lambda x: x.objId)):
print(obj)
outputBytes += obj.bytesForSerialization()
outputBytes += pack("<i", bootstrapCtx.objId)
print("Serialized {} objects".format(len(self.objects)))
return outputBytes
if __name__ == '__main__':
print("QSIL Bootstrapper")
p = Parser(open("qsil1.sources", "rb"))
out = p.readall()
with open("qsil1.image", "wb") as outFile:
outFile.write(out)
print("Wrote {} bytes".format(len(out)))
|
from typing import Optional
from django.db import models
from ...apps import UFDLCoreAppConfig
from ...exceptions import *
from ..mixins import DeleteOnNoRemainingReferencesOnlyModel, DeleteOnNoRemainingReferencesOnlyQuerySet
class NodeQuerySet(DeleteOnNoRemainingReferencesOnlyQuerySet):
"""
A query-set over worker nodes.
"""
pass
class Node(DeleteOnNoRemainingReferencesOnlyModel):
"""
A worker node.
"""
# The IP address of the worker node
ip = models.CharField(max_length=39)
# An identifier to disambiguate between multiple nodes on the same system
index = models.PositiveSmallIntegerField()
# The NVidia driver version
driver_version = models.CharField(max_length=16, null=True)
# The hardware generation of graphics on the node
hardware_generation = models.ForeignKey(f"{UFDLCoreAppConfig.label}.Hardware",
on_delete=models.DO_NOTHING,
related_name="nodes",
null=True)
# The amount of GPU memory available on the node, in MB
gpu_mem = models.BigIntegerField(null=True)
# The amount of CPU memory available on the node, in MB
cpu_mem = models.BigIntegerField()
# The timestamp when the node last made contact
last_seen = models.DateTimeField(null=True, default=None)
# The job the node is currently working on
current_job = models.ForeignKey(f"{UFDLCoreAppConfig.label}.Job",
on_delete=models.DO_NOTHING,
related_name="+",
null=True,
default=None)
objects = NodeQuerySet.as_manager()
class Meta:
constraints = [
# Ensure each unique node is only registered once
models.UniqueConstraint(name="unique_nodes",
fields=["ip", "index"])
]
@property
def is_working_job(self) -> bool:
"""
Whether this node is currently working a job.
"""
return self.current_job is not None
@classmethod
def from_request(cls, request) -> Optional['Node']:
"""
Gets the node from the request, if there is one.
:param request: The request.
:return: The node, or None if there isn't one.
"""
# Get the node ID from the header if specified
node_id = request.headers.get("Node-Id", None)
# If not specified, return None
if node_id is None:
return None
# Attempt to parse the node ID
try:
node = int(node_id)
except ValueError as e:
raise BadNodeID(node_id, "Unable to parse into an integer primary-key")
# Filter the primary-key into a node object
node = cls.objects.filter(pk=node).first()
# If the node doesn't exist, raise an error
if node is None:
raise BadNodeID(node_id, "No node with this primary-key")
return node
|
import dotenv
import tweepy
def load_dot():
dotenv.load()
def handleAuth():
consumer_key = dotenv.get('ENV_KEY')
consumer_secret = dotenv.get('ENV_SECRET')
access_key = dotenv.get('TOKEN_KEY')
access_secret = dotenv.get('TOKEN_SECRET')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
return auth
def handleAPI(auth):
return tweepy.API(auth)
def authorizeApp():
load_dot()
auth = handleAuth()
return handleAPI(auth)
|
# _*_ coding: utf-8 _*_
"""
-------------------------------------------------
@File Name: __init__.py
@Description:
@Author: caimmy
@date: 2019/10/22 17:46
-------------------------------------------------
Change Activity:
-------------------------------------------------
"""
|
import pytest
from api.search.serializers import SearchSerializer
from api_tests import utils
from osf.models import MetaSchema
from osf_tests.factories import (
AuthUserFactory,
NodeFactory,
ProjectFactory,
)
from tests.utils import make_drf_request_with_version, mock_archive
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION
from website.search import search
@pytest.mark.django_db
class TestSearchSerializer:
def test_search_serializer_mixed_model(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user, is_public=True)
component = NodeFactory(parent=project, creator=user, is_public=True)
file_component = utils.create_test_file(component, user)
context = {'request': make_drf_request_with_version(version='2.0')}
schema = MetaSchema.objects.filter(
name='Replication Recipe (Brandt et al., 2013): Post-Completion',
schema_version=LATEST_SCHEMA_VERSION).first()
#test_search_serializer_mixed_model_project
result = SearchSerializer(project, context=context).data
assert result['data']['type'] == 'nodes'
#test_search_serializer_mixed_model_component
result = SearchSerializer(component, context=context).data
assert result['data']['type'] == 'nodes'
#test_search_serializer_mixed_model_registration
with mock_archive(project, autocomplete=True, autoapprove=True, schema=schema) as registration:
result = SearchSerializer(registration, context=context).data
assert result['data']['type'] == 'registrations'
#test_search_serializer_mixed_model_file
result = SearchSerializer(file_component, context=context).data
assert result['data']['type'] == 'files'
#test_search_serializer_mixed_model_user
result = SearchSerializer(user, context=context).data
assert result['data']['type'] == 'users'
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class EdFiStaffSchoolAssociation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'program_assignment_descriptor': 'str',
'calendar_reference': 'EdFiCalendarReference',
'school_reference': 'EdFiSchoolReference',
'school_year_type_reference': 'EdFiSchoolYearTypeReference',
'staff_reference': 'EdFiStaffReference',
'academic_subjects': 'list[EdFiStaffSchoolAssociationAcademicSubject]',
'grade_levels': 'list[EdFiStaffSchoolAssociationGradeLevel]',
'etag': 'str'
}
attribute_map = {
'id': 'id',
'program_assignment_descriptor': 'programAssignmentDescriptor',
'calendar_reference': 'calendarReference',
'school_reference': 'schoolReference',
'school_year_type_reference': 'schoolYearTypeReference',
'staff_reference': 'staffReference',
'academic_subjects': 'academicSubjects',
'grade_levels': 'gradeLevels',
'etag': '_etag'
}
def __init__(self, id=None, program_assignment_descriptor=None, calendar_reference=None, school_reference=None, school_year_type_reference=None, staff_reference=None, academic_subjects=None, grade_levels=None, etag=None, _configuration=None): # noqa: E501
"""EdFiStaffSchoolAssociation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._id = None
self._program_assignment_descriptor = None
self._calendar_reference = None
self._school_reference = None
self._school_year_type_reference = None
self._staff_reference = None
self._academic_subjects = None
self._grade_levels = None
self._etag = None
self.discriminator = None
if id is not None:
self.id = id
self.program_assignment_descriptor = program_assignment_descriptor
if calendar_reference is not None:
self.calendar_reference = calendar_reference
self.school_reference = school_reference
if school_year_type_reference is not None:
self.school_year_type_reference = school_year_type_reference
self.staff_reference = staff_reference
if academic_subjects is not None:
self.academic_subjects = academic_subjects
if grade_levels is not None:
self.grade_levels = grade_levels
if etag is not None:
self.etag = etag
@property
def id(self):
"""Gets the id of this EdFiStaffSchoolAssociation. # noqa: E501
# noqa: E501
:return: The id of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EdFiStaffSchoolAssociation.
# noqa: E501
:param id: The id of this EdFiStaffSchoolAssociation. # noqa: E501
:type: str
"""
self._id = id
@property
def program_assignment_descriptor(self):
"""Gets the program_assignment_descriptor of this EdFiStaffSchoolAssociation. # noqa: E501
The name of the program for which the individual is assigned; for example: Regular education Title I-Academic Title I-Non-Academic Special Education Bilingual/English as a Second Language. # noqa: E501
:return: The program_assignment_descriptor of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: str
"""
return self._program_assignment_descriptor
@program_assignment_descriptor.setter
def program_assignment_descriptor(self, program_assignment_descriptor):
"""Sets the program_assignment_descriptor of this EdFiStaffSchoolAssociation.
The name of the program for which the individual is assigned; for example: Regular education Title I-Academic Title I-Non-Academic Special Education Bilingual/English as a Second Language. # noqa: E501
:param program_assignment_descriptor: The program_assignment_descriptor of this EdFiStaffSchoolAssociation. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and program_assignment_descriptor is None:
raise ValueError("Invalid value for `program_assignment_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
program_assignment_descriptor is not None and len(program_assignment_descriptor) > 306):
raise ValueError("Invalid value for `program_assignment_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._program_assignment_descriptor = program_assignment_descriptor
@property
def calendar_reference(self):
"""Gets the calendar_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:return: The calendar_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: EdFiCalendarReference
"""
return self._calendar_reference
@calendar_reference.setter
def calendar_reference(self, calendar_reference):
"""Sets the calendar_reference of this EdFiStaffSchoolAssociation.
:param calendar_reference: The calendar_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:type: EdFiCalendarReference
"""
self._calendar_reference = calendar_reference
@property
def school_reference(self):
"""Gets the school_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:return: The school_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: EdFiSchoolReference
"""
return self._school_reference
@school_reference.setter
def school_reference(self, school_reference):
"""Sets the school_reference of this EdFiStaffSchoolAssociation.
:param school_reference: The school_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:type: EdFiSchoolReference
"""
if self._configuration.client_side_validation and school_reference is None:
raise ValueError("Invalid value for `school_reference`, must not be `None`") # noqa: E501
self._school_reference = school_reference
@property
def school_year_type_reference(self):
"""Gets the school_year_type_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:return: The school_year_type_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: EdFiSchoolYearTypeReference
"""
return self._school_year_type_reference
@school_year_type_reference.setter
def school_year_type_reference(self, school_year_type_reference):
"""Sets the school_year_type_reference of this EdFiStaffSchoolAssociation.
:param school_year_type_reference: The school_year_type_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:type: EdFiSchoolYearTypeReference
"""
self._school_year_type_reference = school_year_type_reference
@property
def staff_reference(self):
"""Gets the staff_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:return: The staff_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: EdFiStaffReference
"""
return self._staff_reference
@staff_reference.setter
def staff_reference(self, staff_reference):
"""Sets the staff_reference of this EdFiStaffSchoolAssociation.
:param staff_reference: The staff_reference of this EdFiStaffSchoolAssociation. # noqa: E501
:type: EdFiStaffReference
"""
if self._configuration.client_side_validation and staff_reference is None:
raise ValueError("Invalid value for `staff_reference`, must not be `None`") # noqa: E501
self._staff_reference = staff_reference
@property
def academic_subjects(self):
"""Gets the academic_subjects of this EdFiStaffSchoolAssociation. # noqa: E501
An unordered collection of staffSchoolAssociationAcademicSubjects. The academic subjects the individual is eligible to teach. # noqa: E501
:return: The academic_subjects of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: list[EdFiStaffSchoolAssociationAcademicSubject]
"""
return self._academic_subjects
@academic_subjects.setter
def academic_subjects(self, academic_subjects):
"""Sets the academic_subjects of this EdFiStaffSchoolAssociation.
An unordered collection of staffSchoolAssociationAcademicSubjects. The academic subjects the individual is eligible to teach. # noqa: E501
:param academic_subjects: The academic_subjects of this EdFiStaffSchoolAssociation. # noqa: E501
:type: list[EdFiStaffSchoolAssociationAcademicSubject]
"""
self._academic_subjects = academic_subjects
@property
def grade_levels(self):
"""Gets the grade_levels of this EdFiStaffSchoolAssociation. # noqa: E501
An unordered collection of staffSchoolAssociationGradeLevels. The grade levels the individual is eligible to teach. # noqa: E501
:return: The grade_levels of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: list[EdFiStaffSchoolAssociationGradeLevel]
"""
return self._grade_levels
@grade_levels.setter
def grade_levels(self, grade_levels):
"""Sets the grade_levels of this EdFiStaffSchoolAssociation.
An unordered collection of staffSchoolAssociationGradeLevels. The grade levels the individual is eligible to teach. # noqa: E501
:param grade_levels: The grade_levels of this EdFiStaffSchoolAssociation. # noqa: E501
:type: list[EdFiStaffSchoolAssociationGradeLevel]
"""
self._grade_levels = grade_levels
@property
def etag(self):
"""Gets the etag of this EdFiStaffSchoolAssociation. # noqa: E501
A unique system-generated value that identifies the version of the resource. # noqa: E501
:return: The etag of this EdFiStaffSchoolAssociation. # noqa: E501
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""Sets the etag of this EdFiStaffSchoolAssociation.
A unique system-generated value that identifies the version of the resource. # noqa: E501
:param etag: The etag of this EdFiStaffSchoolAssociation. # noqa: E501
:type: str
"""
self._etag = etag
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EdFiStaffSchoolAssociation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EdFiStaffSchoolAssociation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EdFiStaffSchoolAssociation):
return True
return self.to_dict() != other.to_dict()
|
#-*- coding: utf-8 -*-
import sys
import os
import glob
import click
#import click_completion
#click_completion.init()
from sequana import version
import functools
__all__ = ["main"]
import sequana
import colorlog
logger = colorlog.getLogger(__name__)
# This can be used by all commands as a simple decorator
def common_logger(func):
@click.option("--logger", default="INFO",
type=click.Choice(["INFO", "DEBUG", "WARNING", "CRITICAL", "ERROR"]))
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def get_env_vars(ctx, args, incomplete):
return [k for k in os.environ.keys() if incomplete in k]
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import pkg_resources
pipelines = [item.key for item in pkg_resources.working_set if item.key.startswith("sequana")]
if len(pipelines):
version +="\nThe following pipelines are installed:\n"
for item in pkg_resources.working_set:
if item.key.startswith("sequana") and item.key != 'sequana':
version += "\n - {} version: {}".format(item.key, item.version)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=version)
def main(**kwargs):
"""\bThis is the main entry point for a set of Sequana applications.
Pipelines such as sequana_rnaseq, sequana_variant_calling have their own
application and help.
In addition, more advanced tools such as sequana_taxonomy or
sequana_coverage have their own standalone.
"""
pass
@main.command()
@click.argument('filename', type=click.STRING, nargs=-1)
@click.option("-o", "--output",
help="filename where to save results. to be used with --head, --tail")
@click.option("--count-reads", is_flag=True)
@click.option("--head", type=click.INT,
help='number of reads to extract from the head')
@click.option("--merge", is_flag=True)
@click.option("--tail", type=click.INT,
help="number of reads to extract from the tail")
def fastq(**kwargs):
"""Set of useful utilities for FastQ manipulation.
Input file can be gzipped or not. The --output-file
"""
from sequana.fastq import FastQ
filenames = kwargs['filename']
# users may provide a wildcards such as "A*gz" or list of files.
if len(filenames) == 1:
# if existing files or glob, a glob would give the same answer.
filenames = glob.glob(filenames[0])
for filename in filenames:
os.path.exists(filename)
# could be simplified calling count_reads only once
if kwargs['count_reads']:
for filename in filenames:
f = FastQ(filename)
Nreads = f.count_reads()
Nlines = Nreads * 4
print(f"Number of reads in {filename}: {Nreads}")
print(f"Number of lines in {filename}: {Nlines}")
elif kwargs['head']:
for filename in filenames:
f = FastQ(filename)
if kwargs['output'] is None:
logger.error("Please use --output to tell us where to save the results")
sys.exit(1)
N = kwargs['head'] * 4
f.extract_head(N=N, output_filename=kwargs['output'])
elif kwargs['tail']: #pragma: no cover
raise NotImplementedError
elif kwargs['merge']:
import subprocess
# merge all input files (assuming gz extension)
extensions = [filename.split(".")[-1] for filename in filenames]
if set(extensions) != set(['gz']):
raise ValueError("Your input FastQ files must be zipped")
output_filename = kwargs['output']
if output_filename is None:
logger.error("You must use --output filename.gz")
sys.exit(1)
if output_filename.endswith(".gz") is False:
raise ValueError("your output file must end in .gz")
p1 = subprocess.Popen(['zcat'] + list(filenames), stdout=subprocess.PIPE)
fout = open(output_filename, 'wb')
p2 = subprocess.run(['pigz'], stdin=p1.stdout, stdout=fout)
else: #pragma: no cover
print("Use one of the commands")
@main.command()
@click.argument('name', type=click.STRING)
@click.option('--check', is_flag=True)
@click.option('--extract-adapters', is_flag=True)
@click.option('--quick-fix', is_flag=True)
@click.option('--output', default=None)
def samplesheet(**kwargs):
"""Utilities to manipulate sample sheet"""
name = kwargs['name']
from sequana.iem import IEM
if kwargs['check']:
iem = IEM(name)
iem.validate()
logger.info("SampleSheet looks correct")
elif kwargs["extract_adapters"]:
iem = IEM(name)
iem.to_fasta()
elif kwargs["quick_fix"]:
iem = IEM(name, tryme=True)
if kwargs['output']:
filename = kwargs['output']
else:
filename = name + ".fixed"
logger.info("Saving fixed version in {}".format(filename))
iem.quick_fix(output_filename=filename)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=-1)
@click.option("--module",
required=False,
type=click.Choice(["bamqc", "bam", "fasta", "fastq", "gff"]))
def summary(**kwargs):
"""Create a HTML report for various type of NGS formats.
\b
* bamqc
* fastq
This will process all files in the given pattern (in back quotes)
sequentially and procude one HTML file per input file.
Other module all work in the same way. For example, for FastQ files::
sequana summary one_input.fastq
sequana summary `ls *fastq`
"""
names = kwargs['name']
module = kwargs['module']
if module is None:
if names[0].endswith('fastq.gz') or names[0].endswith('.fastq'):
module = "fastq"
elif names[0].endswith('.bam'):
module = "bam"
elif names[0].endswith('.gff') or names[0].endswith('gff3'):
module = "gff"
elif names[0].endswith('fasta.gz') or names[0].endswith('.fasta'):
module = "fasta"
else:
logger.error("please use --module to tell us about the input fimes")
sys.exit(1)
if module == "bamqc":
for name in names:
print(f"Processing {name}")
from sequana.modules_report.bamqc import BAMQCModule
report = BAMQCModule(name, "bamqc.html")
elif module == "fasta": # there is no module per se. HEre we just call FastA.summary()
from sequana.fasta import FastA
for name in names:
f = FastA(name)
f.summary()
elif module == "fastq": # there is no module per se. HEre we just call FastA.summary()
from sequana.fastq import FastQ
from sequana import FastQC
for filename in names:
ff = FastQC(filename, max_sample=1e6, verbose=False)
stats = ff.get_stats()
print(stats)
elif module == "bam":
import pandas as pd
from sequana import BAM
for filename in names:
ff = BAM(filename)
stats = ff.get_stats()
df = pd.Series(stats).to_frame().T
print(df)
elif module == "gff":
import pandas as pd
from sequana import GFF3
for filename in names:
ff = GFF3(filename)
print("#filename: {}".format(filename))
print("#Number of entries per genetic type:")
print(ff.df.value_counts('type').to_string())
print("#Number of duplicated attribute (if any) per attribute:")
ff.get_duplicated_attributes_per_type()
@main.command()
@click.option("--file1", type=click.Path(),
default=None, required=True,
help="""The first input RNA-seq table to compare""")
@click.option("--file2", type=click.Path(),
default=None, required=True,
help="""The second input RNA-seq table to compare""")
@common_logger
def rnaseq_compare(**kwargs):
"""Compare 2 tables created by the 'sequana rnadiff' command"""
from sequana.compare import RNADiffCompare
c = RNADiffCompare(kwargs['file1'], kwargs['file2'])
c.plot_volcano_differences()
from pylab import savefig
savefig("sequana_rnaseq_compare_volcano.png", dpi=200)
@main.command()
@click.option("--annotation", type=click.Path(),
default=None,
help="""The annotation GFF file used to perform the feature count""")
@click.option("--report-only",
is_flag=True,
default=False,
help="""Generate report assuming results are already present""")
@click.option("--output-directory", type=click.Path(),
default="rnadiff",
help="""Output directory where are saved the results""")
@click.option("--features", type=click.Path(),
default="all_features.out",
help="""The Counts from feature counts. This should be the output of the
sequana_rnaseq pipeline all_features.out """)
#FIXME I think it would be better to have a single file with multiple columns
#for alternative condition (specified using the "condition" option)
@click.option("--design", type=click.Path(),
default="design.csv", help="""It should have been generated by sequana_rnaseq. If
not, it must be a comma separated file with two columns. One for the label to be
found in the --features file and one column with the condition to which it
belong. E.g. with 3 replicates and 2 conditions. It should look like:
\b
label,condition
WT1,WT
WT2,WT
WT3,WT
file1,cond1
fileother,cond1
""")
@click.option("--condition", type=str,
default="condition", help="""The name of the column in design.csv to use as condition
for the differential analysis. Default is 'condition'""")
@click.option("--feature-name",
default="gene",
help="""The feature name compatible with your GFF. Default is 'gene'""")
@click.option("--attribute-name",
default="ID",
help="""The attribute used as identifier. compatible with your GFF. Default is 'ID'""")
@click.option("--reference", type=click.Path(),
default=None,
help="""The reference to test DGE against. If provided, conditions not
involving the reference are ignored. Otherwise all combinations are
tested""")
@click.option("--comparisons", type=click.Path(),
default=None,
help="""Not yet implemented. By default, all comparisons are computed""")
@click.option("--cooks-cutoff", type=click.Path(),
default=None,
help="""if none, let DESeq2 choose the cutoff""")
@click.option("--independent-filtering/--no-independent-filtering",
default=False,
help="""Do not perform independent_filtering by default. low counts may not
have adjusted pvalues otherwise""")
@click.option("--beta-prior/--no-beta-prior",
default=False,
help="Use beta priori or not. Default is no beta prior")
@click.option("--fit-type",
default="parametric",
help="DESeq2 type of fit. Default is 'parametric'")
@common_logger
def rnadiff(**kwargs):
"""Perform RNA-seq differential analysis.
This command performs the differential analysis of gene expression. The
analysis is performed on feature counts generated by a RNA-seq analysis
(see e.g. https://github.com/sequana/rnaseq pipeline). The analysis is
performed by DESeq2. A HTML report is created as well as a set of output
files, including summary table of the analysis.
To perform this analysis, you will need the GFF file used during the RNA-seq
analysis, the feature stored altogether in a single file, an experimental
design file, and the feature and attribute used during the feature count.
Here is an example:
\b
sequana rnadiff --annotation Lepto.gff
--design design.csv --features all_features.out
--feature-name gene --attribute-name ID
"""
import pandas as pd
from sequana.featurecounts import FeatureCount
from sequana.rnadiff import RNADiffAnalysis, RNADesign
from sequana.modules_report.rnadiff import RNAdiffModule
logger.setLevel(kwargs['logger'])
outdir = kwargs['output_directory']
feature = kwargs['feature_name']
attribute = kwargs['attribute_name']
design = kwargs['design']
reference=kwargs['reference']
if kwargs['annotation']:
gff = kwargs['annotation']
logger.info(f"Checking annotation file")
from sequana import GFF3
g = GFF3(gff) #.save_annotation_to_csv()
if feature not in g.features:
logger.critical(f"{feature} not found in the GFF. Most probably a wrong feature name")
attributes = g.get_attributes(feature)
if attribute not in attributes:
logger.critical(f"{attribute} not found in the GFF for the provided feature. Most probably a wrong feature name. Please change --attribute-name option or do not provide any GFF")
sys.exit(1)
else:
gff = None
design_check = RNADesign(design, reference=reference)
compa_csv = kwargs['comparisons']
if compa_csv:
compa_df = pd.read_csv(compa_csv)
comparisons = list(zip(compa_df["alternative"], compa_df["reference"]))
else:
comparisons = design_check.comparisons
if kwargs['report_only'] is False:
logger.info(f"Processing features counts and saving into {outdir}/light_counts.csv")
fc = FeatureCount(kwargs['features'])
from easydev import mkdirs
mkdirs(f"{outdir}")
fc.rnadiff_df.to_csv(f"{outdir}/light_counts.csv")
logger.info(f"Differential analysis to be saved into ./{outdir}")
for k in sorted(["independent_filtering", "beta_prior",
"cooks_cutoff", "fit_type", "reference"]):
logger.info(f" Parameter {k} set to : {kwargs[k]}")
r = RNADiffAnalysis(f"{outdir}/light_counts.csv", design,
condition=kwargs["condition"],
comparisons=comparisons,
fc_feature=feature,
fc_attribute=attribute,
outdir=outdir,
gff=gff,
cooks_cutoff=kwargs.get("cooks_cutoff"),
independent_filtering=kwargs.get("independent_filtering"),
beta_prior=kwargs.get("beta_prior"),
fit_type=kwargs.get('fit_type')
)
logger.info(f"Saving output files into {outdir}/rnadiff.csv")
try:
results = r.run()
results.to_csv(f"{outdir}/rnadiff.csv")
except Exception as err:
logger.error(err)
sys.exit(1)
else:
logger.info(f"DGE done.")
# cleanup if succesful
os.remove(f"{outdir}/rnadiff.err")
os.remove(f"{outdir}/rnadiff.out")
os.remove(f"{outdir}/rnadiff_light.R")
logger.info(f"Reporting. Saving in rnadiff.html")
report = RNAdiffModule(outdir, kwargs['design'], gff=gff,
fc_attribute=attribute,
fc_feature=feature,
alpha=0.05,
log2_fc=0,
condition=kwargs["condition"],
annot_cols=None,
pattern="*vs*_degs_DESeq2.csv")
@main.command()
@click.option("--mart", default="ENSEMBL_MART_ENSEMBL",
show_default=True,
help="A valid mart name")
@click.option("--dataset", required=True,
help="A valid dataset name. e.g. mmusculus_gene_ensembl, hsapiens_gene_ensembl")
@click.option("--attributes", multiple=True,
default=["ensembl_gene_id","go_id","entrezgene_id","external_gene_name"],
show_default=True,
help="A list of valid attributes to look for in the dataset")
@click.option("--output", default=None,
help="""by default save results into a CSV file named
biomart_<dataset>_<YEAR>_<MONTH>_<DAY>.csv""")
@common_logger
def biomart(**kwargs):
"""Retrieve information from biomart and save into CSV file
This command uses BioMart from BioServices to introspect a MART service
(--mart) and a specific dataset (default to mmusculus_gene_ensembl). Then,
for all ensembl IDs, it will fetch the requested attributes (--attributes).
Finally, it saves the CSV file into an output file (--output). This takes
about 5-10 minutes to retrieve the data depending on the connection.
"""
print(kwargs)
logger.setLevel(kwargs["logger"])
mart = kwargs['mart']
attributes = kwargs['attributes']
dataset = kwargs["dataset"]
from sequana.enrichment import Mart
conv = Mart(dataset, mart)
df = conv.query(attributes)
conv.save(df, filename=kwargs['output'])
@main.command()
@click.option("-i", "--input", required=True,
help="The salmon input file.")
@click.option("-o", "--output", required=True,
help="The feature counts output file")
@click.option("-f", "--gff", required=True,
help="A GFF file compatible with your salmon file")
@click.option("-a", "--attribute", default="ID",
help="A valid attribute to be found in the GFF file and salmon input")
@click.option("-a", "--feature", default="gene",
help="A valid feature")
def salmon(**kwargs):
"""Convert output of Salmon into a feature counts file """
from sequana import salmon
salmon_input = kwargs['input']
output = kwargs["output"]
if os.path.exists(salmon_input) is False:
logger.critical("Input file does not exists ({})".format(salmon_input))
gff = kwargs["gff"]
attribute = kwargs['attribute']
feature = kwargs['feature']
# reads file generated by salmon and generated count file as expected by
# DGE.
s = salmon.Salmon(salmon_input, gff)
s.save_feature_counts(output, feature=feature, attribute=attribute)
@main.command()
@click.option("-i", "--input", required=True)
@click.option("-o", "--output", required=True)
def gtf_fixer(**kwargs):
"""Reads GTF and fix known issues (exon and genes uniqueness)"""
from sequana.gtf import GTFFixer
gtf = GTFFixer(kwargs['input'])
res = gtf.fix_exons_uniqueness(kwargs['output'])
#res = gtf.fix_exons_uniqueness(kwargs['output'])
print(res)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True),
nargs=1)
@click.option("--annotation-attribute", type=click.STRING,
#required=True,
default="Name",
help="a valid taxon identifiers")
@click.option("--panther-taxon", type=click.INT,
#required=True,
default=0,
help="a valid taxon identifiers")
@click.option("--kegg-name", type=click.STRING,
default=None,
help="a valid KEGG name (automatically filled for 9606 (human) and 10090 (mmusculus)")
@click.option("--log2-foldchange-cutoff", type=click.FLOAT,
default=1,
show_default=True,
help="remove events with absolute log2 fold change below this value")
@click.option("--padj-cutoff", type=click.FLOAT,
default=0.05,
show_default=True,
help="remove events with pvalue abobe this value default (0.05).")
@click.option("--biomart", type=click.STRING,
default=None,
help="""you may need a biomart mapping of your identifier for the kegg
pathways analysis. If you do not have this file, you can use 'sequana biomart'
command""")
@click.option("--go-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only panther db enrichment""")
@click.option("--plot-linearx", type=click.BOOL,
default=False,
is_flag=True,
help="""Default is log2 fold enrichment in the plots. use this to use linear scale""")
@click.option("--compute-levels", type=click.BOOL,
default=False,
is_flag=True,
help="""to compute the GO levels (slow) in the plots""")
@click.option("--max-genes", type=click.INT,
default=2000,
help="""Maximum number of genes (up or down) to use in PantherDB, which is limited to about 3000""")
@click.option("--kegg-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only kegg patways enrichment""")
@click.option("--kegg-pathways-directory", type=click.Path(),
default=None,
help="""a place where to find the pathways for each organism""")
@click.option("--kegg-background", type=click.INT,
default=None,
help="""a background for kegg enrichment. If None, set to number of genes found in KEGG""")
@common_logger
def enrichment(**kwargs):
"""Create a HTML report for various sequana out
\b
* enrichment: the output of RNADiff pipeline
Example for the enrichment module:
sequana enrichment rnadiff.csv --panther-taxon 10090
--log2-foldchange-cutoff 2 --kegg-only
The KEGG pathways are loaded and it may take time. Once done, they are saved
in kegg_pathways/organism and be loaded next time:
sequana enrichment rnadiff/rnadiff.csv
--panther-taxon 189518 \
--log2-foldchange-cutoff 2 --kegg-only \
--kegg-name lbi\
--annotation file.gff
"""
import pandas as pd
from sequana.modules_report.enrichment import Enrichment
logger.setLevel(kwargs['logger'])
taxon = kwargs['panther_taxon']
if taxon == 0:
logger.error("You must provide a taxon with --panther-taxon")
return
keggname = kwargs['kegg_name']
params = {"padj": kwargs['padj_cutoff'],
"log2_fc": kwargs['log2_foldchange_cutoff'],
"max_entries": kwargs['max_genes'],
"mapper": kwargs['biomart'],
"kegg_background": kwargs['kegg_background'],
"preload_directory": kwargs['kegg_pathways_directory'],
"plot_logx": not kwargs['plot_linearx'],
"plot_compute_levels": kwargs['compute_levels'],
}
filename = kwargs['biomart']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
filename = kwargs['kegg_pathways_directory']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
rnadiff_file = kwargs['name']
logger.info(f"Reading {rnadiff_file}")
rnadiff = pd.read_csv(rnadiff_file, index_col=0, header=[0,1])
# now that we have loaded all results from a rnadiff analysis, let us
# perform the enrichment for each comparison found in the file
annot_col = kwargs['annotation_attribute']
Nmax = kwargs['max_genes']
from sequana.utils import config
for compa in rnadiff.columns.levels[0]:
if compa not in ['statistics', 'annotation']:
# get gene list
df = rnadiff[compa].copy()
# we add the annotation
for x in rnadiff['annotation'].columns:
df[x] = rnadiff['annotation'][x]
# now we find the gene lists
padj = params['padj']
log2fc = params['log2_fc']
df = df.query("(log2FoldChange >=@log2fc or log2FoldChange<=-@log2fc) and padj <= @padj")
df.reset_index(inplace=True)
dfup = df.sort_values("log2FoldChange", ascending=False)
up_genes = list(dfup.query("log2FoldChange > 0")[annot_col])[:Nmax]
dfdown = df.sort_values("log2FoldChange", ascending=True)
down_genes = list(dfdown.query("log2FoldChange < 0")[annot_col])[:Nmax]
all_genes = list(
df.sort_values("log2FoldChange", key=abs,ascending=False)[annot_col]
)[:Nmax]
gene_dict = {
"up": up_genes,
"down": down_genes,
"all": all_genes,
}
Nup = len(up_genes)
Ndown = len(down_genes)
N = Nup + Ndown
logger.info(f"Computing enrichment for the {compa} case")
logger.info(f"Found {Nup} genes up-regulated, {Ndown} down regulated ({N} in total).")
config.output_dir = f"enrichment/{compa}"
try:os.mkdir("enrichment")
except:pass
report = Enrichment(gene_dict, taxon, df,
kegg_organism=keggname,
enrichment_params=params,
go_only=kwargs["go_only"],
kegg_only=kwargs["kegg_only"],
command=" ".join(['sequana'] + sys.argv[1:]))
@main.command()
@click.option("--search-kegg", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@click.option("--search-panther", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@common_logger
def taxonomy(**kwargs):
"""Tool to retrieve taxonomic information.
sequana taxonomy --search-kegg leptospira
"""
if kwargs['search_kegg']:
from sequana.kegg import KEGGHelper
k = KEGGHelper()
results = k.search(kwargs['search_kegg'].lower())
print(results)
elif kwargs['search_panther']:
import pandas as pd
from sequana import sequana_data
df = pd.read_csv(sequana_data("panther.csv"), index_col=0)
pattern = kwargs['search_panther']
f1 = df[[True if pattern in x else False for x in df['name']]]
f2 = df[[True if pattern in x else False for x in df.short_name]]
f3 = df[[True if pattern in x else False for x in df.long_name]]
indices = list(f1.index) + list(f2.index) + list(f3.index)
if len(indices) == 0:
# maybe it is a taxon ID ?
f4 = df[[True if pattern in str(x) else False for x in df.taxon_id]]
indices = list(f4.index)
indices = set(indices)
print(df.loc[indices])
@main.command()
@click.argument("gff_filename", type=click.Path(exists=True))
@common_logger
def gff2gtf(**kwargs):
"""Convert a GFF file into GTF
This is experimental convertion. Use with care.
"""
filename = kwargs["gff_filename"]
assert filename.endswith(".gff") or filename.endswith(".gff3")
from sequana.gff3 import GFF3
g = GFF3(filename)
if filename.endswith(".gff"):
g.to_gtf(os.path.basename(filename).replace(".gff", ".gtf"))
elif filename.endswith(".gff3"):
g.to_gtf(os.path.basename(filename).replace(".gff3", ".gtf"))
|
import datetime as dt
from library.ftx.base import AsyncBaseApiClass
class Wallet(AsyncBaseApiClass):
"""https://docs.ftx.com/#account"""
def __init__(self, api_key: str, secret_key: str, subaccount_name: str = ''):
super().__init__(api_key, secret_key, subaccount_name)
async def get_coins(self):
""" https://docs.ftx.com/#get-coins """
return await self.get('/api/wallet/coins')
async def get_balances(self):
""" https://docs.ftx.com/#get-balances """
return await self.get('/api/wallet/balances')
async def get_balances_of_all_accounts(self):
"""
https://docs.ftx.com/#get-balances-of-all-accounts
The response will contain an object whose keys are the subaccount names.
The main account will appear under the key main.
"""
return await self.get('/api/wallet/all_balances')
async def get_deposit_address(self, coin: str, method: str):
"""
https://docs.ftx.com/#get-deposit-address
For ERC20 tokens : method=erc20
For TRC20 tokens : method=trx
For SPL tokens : method=sol
For Omni tokens : method=omni
For BEP2 tokens : method=bep2
"""
return await self.get(f'/api/wallet/deposit_address/{coin}?method={method}')
async def get_deposit_history(self, start_time: dt.datetime = None, end_time: dt.datetime = None):
""" https://docs.ftx.com/#get-deposit-history """
return await self.get('/api/wallet/deposits', start_time=start_time, end_time=end_time)
async def get_withdrawal_history(self, start_time: dt.datetime = None, end_time: dt.datetime = None):
""" https://docs.ftx.com/#get-withdrawal-history """
return await self.get('/api/wallet/withdrawals', start_time=start_time, end_time=end_time)
async def request_withdrawal(self, coin: str, size: float, address: str, tag: str = None, method: str = None, password: str = None, code: str = None):
"""
https://docs.ftx.com/#request-withdrawal
Args:
coin (str) : [USDTBEAR] coin to withdraw
size (float) : [20.2] amount to withdraw
address (str) : [0x83a12795...] address to send to
tag (str, optional) : string text
method (str, optional) : blockchain to use for withdrawal. async defaults to None.
password (str, optional) : withdrawal password if it is required for your account.
async defaults to None.
code (str, optional) : 2fa code if it is required for your account. async defaults to None.
return awaits:
Dict[str, Any]: result
"""
return await self.post('/api/wallet/withdrawals', data={'coin': coin, 'size': size, 'address': address, 'tag': tag, 'method': method, 'password': password, 'code': code})
async def get_airdrops(self, start_time: dt.datetime = None, end_time: dt.datetime = None):
"""https://docs.ftx.com/#get-airdrops
This endpoint provides you with updates to your AMPL balances based on AMPL rebases. """
return await self.get('/api/wallet/airdrops', start_time=start_time, end_time=end_time)
async def get_withdrawal_fees(self, coin: str, size: float, address: str, tag: str = None):
"""
https://docs.ftx.com/#get-withdrawal-fees
Args:
coin (str) : ["USDC"] coin to withdraw
size (float) : [20.2] amount to withdraw
address (str) : ["0x83a12..."] address to send to
tag (str, optional) : [None]. async defaults to None.
return awaits:
Dict[str, Any]: server response
"""
return await self.post('/api/wallet/withdrawal_fee', data={'coin': coin, 'size': size, 'address': address, 'tag': tag})
async def get_saved_addresses(self, coin: str = None):
"""
https://docs.ftx.com/#get-saved-addresses
This endpoint provides you with your saved addresses.
"""
return await self.get('/api/wallet/saved_addresses')
async def create_saved_addresses(self, coin: str, address: str, addressName: str, isPrimeTrust: bool, tag: str = None):
"""
https://docs.ftx.com/#create-saved-addresses
Args:
coin (str) : [ETH]
address (str) : ["0xb2EA1CC3..."]
addressName (str) : [MetaMask]
isPrimeTrust (bool) : [false]
tag (str, optional) : [null]. async defaults to None.
return awaits:
Dict[str, Any]: [description]
"""
return await self.post('/api/wallet/saved_addresses', data={'coin': coin, 'addressName': addressName, 'address': address, 'isPrimeTrust': isPrimeTrust, 'tag': tag})
async def get_saved_address(self, coin: str = ''):
""" https://docs.ftx.com/#get-saved-addresses """
return await self.get('/api/wallet/saved_addresses', coin=coin)
async def delete_saved_address(self, saved_address_id: int = ''):
""" https://docs.ftx.com/#delete-saved-addresses """
return await self.get(f'/api/wallet/saved_addresses/{saved_address_id}')
|
import click
from .adni_to_bids import adni_to_bids_cli
from .aibl_to_bids import aibl_to_bids_cli
from .nifd_to_bids import nifd_to_bids_cli
from .oasis3_to_bids import oasis3_to_bids_cli
from .oasis_to_bids import oasis_to_bids_cli
@click.group("convert")
def cli() -> None:
"""Convert popular neuroimaging datasets to the BIDS format."""
pass
cli.add_command(adni_to_bids_cli.cli)
cli.add_command(aibl_to_bids_cli.cli)
cli.add_command(nifd_to_bids_cli.cli)
cli.add_command(oasis_to_bids_cli.cli)
# cli.add_command(oasis3_to_bids_cli.cli)
if __name__ == "__main__":
cli()
|
__author__ = 'jwely'
__all__ = ["gap_fill_interpolate"]
from dnppy import core
from to_numpy import to_numpy
from is_rast import is_rast
import arcpy
import os
def gap_fill_interpolate(in_rasterpath, out_rasterpath, model = None,
max_cell_dist = None, min_points = None):
"""
Fills gaps in raster data by spatial kriging interpolation. This should only
be used to fill small gaps in continuous datasets (like a DEM), and in
instances where it makes sense. This function creates a feature class
layer of points where pixels are not NoData, then performs a "kriging"
interpolation on the point data to rebuild a uniform grid with a value
at every location, thus filling gaps.
WARNING: This script is processing intensive and may take a while to run
even for modestly sized datasets.
:param in_rasterpath: input filepath to raster to fill gaps
:param out_rasterpath: filepath to store output gap filled raster in
:param model: type of kriging model to run, options include
"SPHERICAL", "CIRCULAR", "EXPONENTIAL",
"GAUSSIAN", and "LINEAR"
:param max_cell_dist: The maximum number of cells to interpolate between,
data gaps which do not have at least "min_points"
points within this distance will not be filled.
:param min_points: Minimum number of surrounding points to use in determining
value at missing cell.
:return out_rasterpath: Returns path to file created by this function
"""
# check inputs
if not is_rast(in_rasterpath):
raise Exception("input raster path {0} is invalid!".format(in_rasterpath))
if max_cell_dist is None:
max_cell_dist = 10
if min_points is None:
min_points = 4
if model is None:
model = "SPHERICAL"
# set environments
arcpy.env.overwriteOutput = True
arcpy.env.snapRaster = in_rasterpath
arcpy.CheckOutExtension("Spatial")
# make a point shapefile version of input raster
print("Creating point grid from input raster")
head, tail = os.path.split(in_rasterpath)
shp_path = core.create_outname(head, tail, "shp", "shp")
dbf_path = shp_path.replace(".shp",".dbf")
field = "GRID_CODE"
arcpy.RasterToPoint_conversion(in_rasterpath, shp_path, "VALUE")
# find the bad rows who GRID_CODE is 1, these should be NoData
print("Finding points with NoData entries")
bad_row_FIDs = []
rows = arcpy.UpdateCursor(dbf_path)
for row in rows:
grid_code = getattr(row, field)
if grid_code == 1:
bad_row_FIDs.append(row.FID)
del rows
# go back through the list and perform the deletions
numbad = len(bad_row_FIDs)
print("Deleting {0} points with NoData values".format(numbad))
rows = arcpy.UpdateCursor(dbf_path)
for i, row in enumerate(rows):
if row.FID in bad_row_FIDs:
rows.deleteRow(row)
# set up the parameters for kriging
print("Setting up for kriging")
_, meta = to_numpy(in_rasterpath)
model = model
cell_size = meta.cellHeight # from input raster
lagSize = None
majorRange = None
partialSill = None
nugget = None
distance = float(cell_size) * float(max_cell_dist) # fn input
min_points = min_points # fn input
a = arcpy.sa.KrigingModelOrdinary()
kmodel = arcpy.sa.KrigingModelOrdinary("SPHERICAL",
lagSize = lagSize,
majorRange = majorRange,
partialSill = partialSill,
nugget = nugget)
kradius = arcpy.sa.RadiusFixed(distance = distance,
minNumberOfPoints = min_points)
# execute kriging
print("Performing interpolation by kriging, this may take a while!")
outkriging = arcpy.sa.Kriging(shp_path, field, kmodel,
cell_size = cell_size,
search_radius = kradius)
outkriging.save(out_rasterpath)
return out_rasterpath
# testing area
if __name__ == "__main__":
inraster = r"C:\Users\jwely\Desktop\Team_Projects\2015_sumer_CO_water\LiDAR_Format_Trial\mosaic\test_mosaic_gaps.tif"
outraster = r"C:\Users\jwely\Desktop\Team_Projects\2015_sumer_CO_water\LiDAR_Format_Trial\mosaic\test_mosaic_filled.tif"
gap_fill_interpolate(inraster, outraster)
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
class PortalController(BaseController):
class Meta:
label = 'portal'
help = '京东云官网公告API'
description = '''
portal cli 子命令,提供公告操作的相关接口。
OpenAPI文档地址为:https://docs.jdcloud.com/cn/xxx/api/overview
'''
stacked_on = 'base'
stacked_type = 'nested'
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID (cn-north-1:华北-北京) """, dest='regionId', required=False)),
(['--url'], dict(help="""(string) 产品url """, dest='url', required=True)),
(['--lang'], dict(help="""(string) 中文cn 英文en """, dest='lang', required=True)),
(['--ak'], dict(help="""(string) 外部使用ak; """, dest='ak', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 查询产品的详细信息; ''',
description='''
查询产品的详细信息; 。
示例: jdc portal describe-product --url xxx --lang xxx --ak xxx
''',
)
def describe_product(self):
client_factory = ClientFactory('portal')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.portal.apis.DescribeProductRequest import DescribeProductRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeProductRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--region-id'], dict(help="""(string) Region ID (cn-north-1:华北-北京) """, dest='regionId', required=False)),
(['--id'], dict(help="""(int) 语言类型;中文cn;英文en; """, dest='id', type=int, required=True)),
(['--ak'], dict(help="""(string) 外部使用ak; """, dest='ak', required=True)),
(['--input-json'], dict(help='(json) 以json字符串或文件绝对路径形式作为输入参数。\n字符串方式举例:--input-json \'{"field":"value"}\';\n文件格式举例:--input-json file:///xxxx.json', dest='input_json', required=False)),
(['--headers'], dict(help="""(json) 用户自定义Header,举例:'{"x-jdcloud-security-token":"abc","test":"123"}'""", dest='headers', required=False)),
],
formatter_class=RawTextHelpFormatter,
help=''' 产品页列表查询接口; ''',
description='''
产品页列表查询接口; 。
示例: jdc portal describe-products-by-id --id 0 --ak xxx
''',
)
def describe_products_by_id(self):
client_factory = ClientFactory('portal')
client = client_factory.get(self.app)
if client is None:
return
try:
from jdcloud_sdk.services.portal.apis.DescribeProductsByIdRequest import DescribeProductsByIdRequest
params_dict = collect_user_args(self.app)
headers = collect_user_headers(self.app)
req = DescribeProductsByIdRequest(params_dict, headers)
resp = client.send(req)
Printer.print_result(resp)
except ImportError:
print('{"error":"This api is not supported, please use the newer version"}')
except Exception as e:
print(e)
@expose(
arguments=[
(['--api'], dict(help="""(string) api name """, choices=['describe-product','describe-products-by-id',], required=True)),
],
formatter_class=RawTextHelpFormatter,
help=''' 生成单个API接口的json骨架空字符串 ''',
description='''
生成单个API接口的json骨架空字符串。
示例: jdc nc generate-skeleton --api describeContainer ''',
)
def generate_skeleton(self):
skeleton = Skeleton('portal', self.app.pargs.api)
skeleton.show()
|
# tests for correct PEP479 behaviour (introduced in Python 3.5)
# basic case: StopIteration is converted into a RuntimeError
def gen():
yield 1
raise StopIteration
g = gen()
print(next(g))
try:
next(g)
except RuntimeError:
print('RuntimeError')
# trying to continue a failed generator now raises StopIteration
try:
next(g)
except StopIteration:
print('StopIteration')
# throwing a StopIteration which is uncaught will be converted into a RuntimeError
def gen():
yield 1
yield 2
g = gen()
print(next(g))
try:
g.throw(StopIteration)
except RuntimeError:
print('RuntimeError')
# throwing a StopIteration through yield from, will be converted to a RuntimeError
def gen():
yield from range(2)
print('should not get here')
g = gen()
print(next(g))
try:
g.throw(StopIteration)
except RuntimeError:
print('RuntimeError')
|
from typing import Optional
from fastapi import APIRouter
from src.controllers.schedule_controller import get_group_schedule, get_teacher_schedule
from src.schemas.schema import x_schedule_header
from src.utils.events import Events
from src.utils.tracking import track
tag = "Schedule"
router = APIRouter()
@router.get("/groups", tags=[tag])
@track(
fmt="value={value}, date_from={date_from}, date_to={date_to}",
event=Events.GET_GROUPS_SCHEDULE,
)
async def groups_schedule(
*,
value: str,
date_from: Optional[str] = None,
date_to: Optional[str] = None,
schedule_url: str = x_schedule_header
):
return await get_group_schedule(
from_date=date_from, to_date=date_to, group=value, schedule_url=schedule_url
)
@router.get("/teachers", tags=[tag])
@track(
fmt="value={value}, date_from={date_from}, date_to={date_to}",
event=Events.GET_TEACHERS_SCHEDULE,
)
async def teachers_schedule(
*,
value: str,
date_from: Optional[str] = None,
date_to: Optional[str] = None,
schedule_url: str = x_schedule_header
):
return await get_teacher_schedule(
from_date=date_from, to_date=date_to, teacher=value, schedule_url=schedule_url
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class to provide a parsing framework to plaso.
This class contains a base framework class for parsing fileobjects, and
also some implementations that extend it to provide a more comprehensive
parser.
"""
import abc
import csv
import logging
import os
from dfvfs.helpers import text_file
import pyparsing
from plaso.events import text_events
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import lexer
from plaso.lib import timelib
from plaso.lib import utils
from plaso.parsers import interface
import pytz
# Pylint complains about some functions not being implemented that shouldn't
# be since they need to be implemented by children.
# pylint: disable=abstract-method
class SlowLexicalTextParser(interface.BaseParser, lexer.SelfFeederMixIn):
"""Generic text based parser that uses lexer to assist with parsing.
This text parser is based on a rather slow lexer, which makes the
use of this interface highly discouraged. Parsers that already
implement it will most likely all be rewritten to support faster
text parsing implementations.
This text based parser needs to be extended to provide an accurate
list of tokens that define the structure of the log file that the
parser is designed for.
"""
# Define the max number of lines before we determine this is
# not the correct parser.
MAX_LINES = 15
# List of tokens that describe the structure of the log file.
tokens = [
lexer.Token('INITIAL', '(.+)\n', 'ParseString', ''),
]
def __init__(self, local_zone=True):
"""Constructor for the SlowLexicalTextParser.
Args:
local_zone: A boolean value that determines if the entries
in the log file are stored in the local time
zone of the computer that stored it or in a fixed
timezone, like UTC.
"""
# TODO: remove the multiple inheritance.
lexer.SelfFeederMixIn.__init__(self)
interface.BaseParser.__init__(self)
self.line_ready = False
self.attributes = {
'body': '',
'iyear': 0,
'imonth': 0,
'iday': 0,
'time': '',
'hostname': '',
'username': '',
}
self.local_zone = local_zone
self.file_entry = None
def ClearValues(self):
"""Clears all the values inside the attributes dict.
All values that start with the letter 'i' are considered
to be an integer, otherwise string value is assumed.
"""
self.line_ready = False
for attr in self.attributes:
if attr[0] == 'i':
self.attributes[attr] = 0
else:
self.attributes[attr] = ''
def ParseIncomplete(self, match=None, **unused_kwargs):
"""Indication that we've got a partial line to match against.
Args:
match: The regular expression match object.
"""
self.attributes['body'] += match.group(0)
self.line_ready = True
def ParseMessage(self, **unused_kwargs):
"""Signal that a line is ready to be parsed."""
self.line_ready = True
def SetMonth(self, match=None, **unused_kwargs):
"""Parses the month.
This is a callback function for the text parser (lexer) and is
called by the corresponding lexer state.
Args:
match: The regular expression match object.
"""
self.attributes['imonth'] = int(
timelib.MONTH_DICT.get(match.group(1).lower(), 1))
def SetDay(self, match=None, **unused_kwargs):
"""Parses the day of the month.
This is a callback function for the text parser (lexer) and is
called by the corresponding lexer state.
Args:
match: The regular expression match object.
"""
self.attributes['iday'] = int(match.group(1))
def SetTime(self, match=None, **unused_kwargs):
"""Set the time attribute.
Args:
match: The regular expression match object.
"""
self.attributes['time'] = match.group(1)
def SetYear(self, match=None, **unused_kwargs):
"""Parses the year.
This is a callback function for the text parser (lexer) and is
called by the corresponding lexer state.
Args:
match: The regular expression match object.
"""
self.attributes['iyear'] = int(match.group(1))
def Parse(self, parser_context, file_entry):
"""Extract data from a text file.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
Yields:
An event object (instance of EventObject).
"""
path_spec_printable = u'{0:s}:{1:s}'.format(
file_entry.path_spec.type_indicator, file_entry.name)
file_object = file_entry.GetFileObject()
self.file_entry = file_entry
# TODO: this is necessary since we inherit from lexer.SelfFeederMixIn.
self.file_object = file_object
# Start by checking, is this a text file or not? Before we proceed
# any further.
file_object.seek(0, os.SEEK_SET)
if not utils.IsText(file_object.read(40)):
raise errors.UnableToParseFile(u'Not a text file, unable to proceed.')
file_object.seek(0, os.SEEK_SET)
error_count = 0
file_verified = False
# We need to clear out few values in the Lexer before continuing.
# There might be some leftovers from previous run.
self.error = 0
self.buffer = ''
while True:
_ = self.NextToken()
if self.state == 'INITIAL':
self.entry_offset = getattr(self, 'next_entry_offset', 0)
self.next_entry_offset = file_object.tell() - len(self.buffer)
if not file_verified and self.error >= self.MAX_LINES * 2:
logging.debug(
u'Lexer error count: {0:d} and current state {1:s}'.format(
self.error, self.state))
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] unsupported file: {1:s}.'.format(
self.NAME, path_spec_printable))
if self.line_ready:
try:
event_object = self.ParseLine(parser_context)
parser_context.ProduceEvent(
event_object, parser_name=self.NAME, file_entry=file_entry)
file_verified = True
except errors.TimestampNotCorrectlyFormed as exception:
error_count += 1
if file_verified:
logging.debug(
u'[{0:s} VERIFIED] Error count: {1:d} and ERROR: {2:d}'.format(
path_spec_printable, error_count, self.error))
logging.warning(
u'[{0:s}] Unable to parse timestamp with error: {1:s}'.format(
self.NAME, exception))
else:
logging.debug((
u'[{0:s} EVALUATING] Error count: {1:d} and ERROR: '
u'{2:d})').format(path_spec_printable, error_count, self.error))
if error_count >= self.MAX_LINES:
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] unsupported file: {1:s}.'.format(
self.NAME, path_spec_printable))
finally:
self.ClearValues()
if self.Empty():
# Try to fill the buffer to prevent the parser from ending prematurely.
self.Feed()
if self.Empty():
break
if not file_verified:
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] unable to parser file: {1:s}.'.format(
self.NAME, path_spec_printable))
file_offset = file_object.get_offset()
if file_offset < file_object.get_size():
logging.error((
u'{0:s} prematurely terminated parsing: {1:s} at offset: '
u'0x{2:08x}.').format(
self.NAME, path_spec_printable, file_offset))
file_object.close()
def ParseString(self, match=None, **unused_kwargs):
"""Return a string with combined values from the lexer.
Args:
match: The regular expression match object.
Returns:
A string that combines the values that are so far
saved from the lexer.
"""
try:
self.attributes['body'] += match.group(1).strip('\n')
except IndexError:
self.attributes['body'] += match.group(0).strip('\n')
def PrintLine(self):
""""Return a string with combined values from the lexer."""
year = getattr(self.attributes, 'iyear', None)
month = getattr(self.attributes, 'imonth', None)
day = getattr(self.attributes, 'iday', None)
if None in [year, month, day]:
date_string = u'[DATE NOT SET]'
else:
try:
year = int(year, 10)
month = int(month, 10)
day = int(day, 10)
date_string = u'{0:04d}-{1:02d}-{2:02d}'.format(year, month, day)
except ValueError:
date_string = u'[DATE INVALID]'
time_string = getattr(self.attributes, 'time', u'[TIME NOT SET]')
hostname_string = getattr(self.attributes, 'hostname', u'HOSTNAME NOT SET')
reporter_string = getattr(
self.attributes, 'reporter', u'[REPORTER NOT SET]')
body_string = getattr(self.attributes, 'body', u'[BODY NOT SET]')
# TODO: this is a work in progress. The reason for the try-catch is that
# the text parser is handed a non-text file and must deal with converting
# arbitrary binary data.
try:
line = u'{0:s} {1:s} [{2:s}] {3:s} => {4:s}'.format(
date_string, time_string, hostname_string, reporter_string,
body_string)
except UnicodeError:
line = 'Unable to print line - due to encoding error.'
return line
def ParseLine(self, parser_context):
"""Return an event object extracted from the current line.
Args:
parser_context: A parser context object (instance of ParserContext).
Returns:
An event object (instance of TextEvent).
"""
if not self.attributes['time']:
raise errors.TimestampNotCorrectlyFormed(
u'Unable to parse timestamp, time not set.')
if not self.attributes['iyear']:
raise errors.TimestampNotCorrectlyFormed(
u'Unable to parse timestamp, year not set.')
times = self.attributes['time'].split(':')
if self.local_zone:
timezone = parser_context.timezone
else:
timezone = pytz.UTC
if len(times) < 3:
raise errors.TimestampNotCorrectlyFormed((
u'Unable to parse timestamp, not of the format HH:MM:SS '
u'[{0:s}]').format(self.PrintLine()))
try:
secs = times[2].split('.')
if len(secs) == 2:
sec, us = secs
else:
sec = times[2]
us = 0
timestamp = timelib.Timestamp.FromTimeParts(
int(self.attributes['iyear']), self.attributes['imonth'],
self.attributes['iday'], int(times[0]), int(times[1]),
int(sec), microseconds=int(us), timezone=timezone)
except ValueError as exception:
raise errors.TimestampNotCorrectlyFormed(
u'Unable to parse: {0:s} with error: {1:s}'.format(
self.PrintLine(), exception))
return self.CreateEvent(
timestamp, getattr(self, 'entry_offset', 0), self.attributes)
# TODO: this is a rough initial implementation to get this working.
def CreateEvent(self, timestamp, offset, attributes):
"""Creates an event.
This function should be overwritten by text parsers that require
to generate specific event object type, the default is TextEvent.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of microseconds since Jan 1, 1970 00:00:00 UTC.
offset: The offset of the event.
attributes: A dict that contains the events attributes.
Returns:
An event object (instance of TextEvent).
"""
return text_events.TextEvent(timestamp, offset, attributes)
class TextCSVParser(interface.BaseParser):
"""An implementation of a simple CSV line-per-entry log files."""
# A list that contains the names of all the fields in the log file.
COLUMNS = []
# A CSV file is comma separated, but this can be overwritten to include
# tab, pipe or other character separation.
VALUE_SEPARATOR = ','
# If there is a header before the lines start it can be defined here, and
# the number of header lines that need to be skipped before the parsing
# starts.
NUMBER_OF_HEADER_LINES = 0
# If there is a special quote character used inside the structured text
# it can be defined here.
QUOTE_CHAR = '"'
# Value that should not appear inside the file, made to test the actual
# file to see if it confirms to standards.
MAGIC_TEST_STRING = 'RegnThvotturMeistarans'
def VerifyRow(self, unused_parser_context, unused_row):
"""Return a bool indicating whether or not this is the correct parser.
Args:
parser_context: A parser context object (instance of ParserContext).
row: A single row from the CSV file.
Returns:
True if this is the correct parser, False otherwise.
"""
pass
def ParseRow(self, parser_context, row_offset, row, file_entry=None):
"""Parse a line of the log file and extract event objects.
Args:
parser_context: A parser context object (instance of ParserContext).
row_offset: The offset of the row.
row: A dictionary containing all the fields as denoted in the
COLUMNS class list.
file_entry: optional file entry object (instance of dfvfs.FileEntry).
The default is None.
"""
event_object = event.EventObject()
if row_offset is not None:
event_object.offset = row_offset
event_object.row_dict = row
parser_context.ProduceEvent(
event_object, parser_name=self.NAME, file_entry=file_entry)
def Parse(self, parser_context, file_entry):
"""Extract data from a CVS file.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
"""
path_spec_printable = file_entry.path_spec.comparable.replace(u'\n', u';')
file_object = file_entry.GetFileObject()
file_object.seek(0, os.SEEK_SET)
text_file_object = text_file.TextFile(file_object)
# If we specifically define a number of lines we should skip do that here.
for _ in range(0, self.NUMBER_OF_HEADER_LINES):
_ = text_file_object.readline()
reader = csv.DictReader(
text_file_object, fieldnames=self.COLUMNS,
restkey=self.MAGIC_TEST_STRING, restval=self.MAGIC_TEST_STRING,
delimiter=self.VALUE_SEPARATOR, quotechar=self.QUOTE_CHAR)
try:
row = reader.next()
except (csv.Error, StopIteration):
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] Unable to parse CSV file: {1:s}.'.format(
self.NAME, path_spec_printable))
number_of_columns = len(self.COLUMNS)
number_of_records = len(row)
if number_of_records != number_of_columns:
file_object.close()
raise errors.UnableToParseFile((
u'[{0:s}] Unable to parse CSV file: {1:s}. Wrong number of '
u'records (expected: {2:d}, got: {3:d})').format(
self.NAME, path_spec_printable, number_of_columns,
number_of_records))
for key, value in row.items():
if key == self.MAGIC_TEST_STRING or value == self.MAGIC_TEST_STRING:
file_object.close()
raise errors.UnableToParseFile((
u'[{0:s}] Unable to parse CSV file: {1:s}. Signature '
u'mismatch.').format(self.NAME, path_spec_printable))
if not self.VerifyRow(parser_context, row):
file_object.close()
raise errors.UnableToParseFile((
u'[{0:s}] Unable to parse CSV file: {1:s}. Verification '
u'failed.').format(self.NAME, path_spec_printable))
self.ParseRow(
parser_context, text_file_object.tell(), row, file_entry=file_entry)
for row in reader:
self.ParseRow(
parser_context, text_file_object.tell(), row, file_entry=file_entry)
file_object.close()
def PyParseRangeCheck(lower_bound, upper_bound):
"""Verify that a number is within a defined range.
This is a callback method for pyparsing setParseAction
that verifies that a read number is within a certain range.
To use this method it needs to be defined as a callback method
in setParseAction with the upper and lower bound set as parameters.
Args:
lower_bound: An integer representing the lower bound of the range.
upper_bound: An integer representing the upper bound of the range.
Returns:
A callback method that can be used by pyparsing setParseAction.
"""
def CheckRange(unused_string, unused_location, tokens):
"""Parse the arguments."""
try:
check_number = tokens[0]
except IndexError:
check_number = -1
if check_number < lower_bound:
raise pyparsing.ParseException(
u'Value: {0:d} precedes lower bound: {1:d}'.format(
check_number, lower_bound))
if check_number > upper_bound:
raise pyparsing.ParseException(
u'Value: {0:d} exceeds upper bound: {1:d}'.format(
check_number, upper_bound))
# Since callback methods for pyparsing need to accept certain parameters
# and there is no way to define conditions, like upper and lower bounds
# we need to return here a method that accepts those pyparsing parameters.
return CheckRange
def PyParseIntCast(unused_string, unused_location, tokens):
"""Return an integer from a string.
This is a pyparsing callback method that converts the matched
string into an integer.
The method modifies the content of the tokens list and converts
them all to an integer value.
Args:
unused_string: The original parsed string.
unused_location: The location within the string where the match was made.
tokens: A list of extracted tokens (where the string to be converted is
stored).
"""
# Cast the regular tokens.
for index, token in enumerate(tokens):
try:
tokens[index] = int(token)
except ValueError:
logging.error(u'Unable to cast [{0:s}] to an int, setting to 0'.format(
token))
tokens[index] = 0
# We also need to cast the dictionary built tokens.
for key in tokens.keys():
try:
tokens[key] = int(tokens[key], 10)
except ValueError:
logging.error(
u'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(
key, tokens[key]))
tokens[key] = 0
def PyParseJoinList(unused_string, unused_location, tokens):
"""Return a joined token from a list of tokens.
This is a callback method for pyparsing setParseAction that modifies
the returned token list to join all the elements in the list to a single
token.
Args:
unused_string: The original parsed string.
unused_location: The location within the string where the match was made.
tokens: A list of extracted tokens. This is the list that should be joined
together and stored as a single token.
"""
join_list = []
for token in tokens:
try:
join_list.append(str(token))
except UnicodeDecodeError:
join_list.append(repr(token))
tokens[0] = u''.join(join_list)
del tokens[1:]
class PyparsingConstants(object):
"""A class that maintains constants for pyparsing."""
# Numbers.
INTEGER = pyparsing.Word(pyparsing.nums).setParseAction(PyParseIntCast)
IPV4_OCTET = pyparsing.Word(pyparsing.nums, min=1, max=3).setParseAction(
PyParseIntCast, PyParseRangeCheck(0, 255))
IPV4_ADDRESS = (IPV4_OCTET + ('.' + IPV4_OCTET) * 3).setParseAction(
PyParseJoinList)
# TODO: Fix the IPv6 address specification to be more accurate (8 :, correct
# size, etc).
IPV6_ADDRESS = pyparsing.Word(':' + pyparsing.hexnums).setParseAction(
PyParseJoinList)
# Common words.
MONTH = pyparsing.Word(
pyparsing.string.uppercase, pyparsing.string.lowercase,
exact=3)
# Define date structures.
HYPHEN = pyparsing.Literal('-').suppress()
YEAR = pyparsing.Word(pyparsing.nums, exact=4).setParseAction(
PyParseIntCast)
TWO_DIGITS = pyparsing.Word(pyparsing.nums, exact=2).setParseAction(
PyParseIntCast)
ONE_OR_TWO_DIGITS = pyparsing.Word(
pyparsing.nums, min=1, max=2).setParseAction(PyParseIntCast)
DATE = pyparsing.Group(
YEAR + pyparsing.Suppress('-') + TWO_DIGITS +
pyparsing.Suppress('-') + TWO_DIGITS)
DATE_REV = pyparsing.Group(
TWO_DIGITS + pyparsing.Suppress('-') + TWO_DIGITS +
pyparsing.Suppress('-') + YEAR)
TIME = pyparsing.Group(
TWO_DIGITS + pyparsing.Suppress(':') + TWO_DIGITS +
pyparsing.Suppress(':') + TWO_DIGITS)
TIME_MSEC = TIME + pyparsing.Suppress('.') + INTEGER
DATE_TIME = DATE + TIME
DATE_TIME_MSEC = DATE + TIME_MSEC
COMMENT_LINE_HASH = pyparsing.Literal('#') + pyparsing.SkipTo(
pyparsing.LineEnd())
# TODO: Add more commonly used structs that can be used by parsers.
PID = pyparsing.Word(
pyparsing.nums, min=1, max=5).setParseAction(PyParseIntCast)
class PyparsingSingleLineTextParser(interface.BaseParser):
"""Single line text parser based on the pyparsing library."""
# The actual structure, this needs to be defined by each parser.
# This is defined as a list of tuples so that more then a single line
# structure can be defined. That way the parser can support more than a
# single type of log entry, despite them all having in common the constraint
# that each log entry is a single line.
# The tuple should have two entries, a key and a structure. This is done to
# keep the structures in an order of priority/preference.
# The key is a comment or an identification that is passed to the ParseRecord
# function so that the developer can identify which structure got parsed.
# The value is the actual pyparsing structure.
LINE_STRUCTURES = []
# In order for the tool to not read too much data into a buffer to evaluate
# whether or not the parser is the right one for this file or not we
# specifically define a maximum amount of bytes a single line can occupy. This
# constant can be overwritten by implementations if their format might have a
# longer line than 400 bytes.
MAX_LINE_LENGTH = 400
# Define an encoding. If a file is encoded using specific encoding it is
# advised to include it here. If this class constant is set all lines wil be
# decoded prior to being sent to parsing by pyparsing, if not properly set it
# could negatively affect parsing of the file.
# If this value needs to be calculated on the fly (not a fixed constant for
# this particular file type) it can be done by modifying the self.encoding
# attribute.
ENCODING = ''
def __init__(self):
"""A constructor for the pyparsing assistant."""
super(PyparsingSingleLineTextParser, self).__init__()
self.encoding = self.ENCODING
self._current_offset = 0
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
self._line_structures = self.LINE_STRUCTURES
def _ReadLine(
self, parser_context, file_entry, text_file_object, max_len=0,
quiet=False, depth=0):
"""Read a single line from a text file and return it back.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
text_file_object: A text file object (instance of dfvfs.TextFile).
max_len: If defined determines the maximum number of bytes a single line
can take.
quiet: If True then a decode warning is not displayed.
depth: A threshold of how many newlines we can encounter before bailing
out.
Returns:
A single line read from the file-like object, or the maximum number of
characters (if max_len defined and line longer than the defined size).
"""
if max_len:
line = text_file_object.readline(max_len)
else:
line = text_file_object.readline()
if not line:
return
# If line is empty, skip it and go on.
if line == '\n' or line == '\r\n':
# Max 40 new lines in a row before we bail out.
if depth == 40:
return ''
return self._ReadLine(
parser_context, file_entry, text_file_object, max_len=max_len,
depth=depth + 1)
if not self.encoding:
return line.strip()
try:
decoded_line = line.decode(self.encoding)
return decoded_line.strip()
except UnicodeDecodeError:
if not quiet:
logging.warning((
u'Unable to decode line [{0:s}...] with encoding: {1:s} in '
u'file: {2:s}').format(
repr(line[1:30]), self.encoding,
parser_context.GetDisplayName(file_entry)))
return line.strip()
def Parse(self, parser_context, file_entry):
"""Extract data from a text file using a pyparsing definition.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
Yields:
An event object (instance of EventObject).
"""
# TODO: find a more elegant way for this; currently the mac_wifi and
# syslog parser seem to rely on this member.
self.file_entry = file_entry
file_object = file_entry.GetFileObject()
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
if not self._line_structures:
raise errors.UnableToParseFile(
u'Line structure undeclared, unable to proceed.')
file_object.seek(0, os.SEEK_SET)
text_file_object = text_file.TextFile(file_object)
line = self._ReadLine(
parser_context, file_entry, text_file_object,
max_len=self.MAX_LINE_LENGTH, quiet=True)
if not line:
raise errors.UnableToParseFile(u'Not a text file.')
if len(line) == self.MAX_LINE_LENGTH or len(
line) == self.MAX_LINE_LENGTH - 1:
logging.debug((
u'Trying to read a line and reached the maximum allowed length of '
u'{0:d}. The last few bytes of the line are: {1:s} [parser '
u'{2:s}]').format(
self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME))
if not utils.IsText(line):
raise errors.UnableToParseFile(u'Not a text file, unable to proceed.')
if not self.VerifyStructure(parser_context, line):
raise errors.UnableToParseFile('Wrong file structure.')
# Set the offset to the beginning of the file.
self._current_offset = 0
# Read every line in the text file.
while line:
parsed_structure = None
use_key = None
# Try to parse the line using all the line structures.
for key, structure in self.LINE_STRUCTURES:
try:
parsed_structure = structure.parseString(line)
except pyparsing.ParseException:
pass
if parsed_structure:
use_key = key
break
if parsed_structure:
parsed_event = self.ParseRecord(
parser_context, use_key, parsed_structure)
if parsed_event:
parsed_event.offset = self._current_offset
parser_context.ProduceEvent(
parsed_event, parser_name=self.NAME, file_entry=file_entry)
else:
logging.warning(u'Unable to parse log line: {0:s}'.format(line))
self._current_offset = text_file_object.get_offset()
line = self._ReadLine(parser_context, file_entry, text_file_object)
file_object.close()
@abc.abstractmethod
def ParseRecord(self, parser_context, key, structure):
"""Parse a single extracted pyparsing structure.
This function takes as an input a parsed pyparsing structure
and produces an EventObject if possible from that structure.
Args:
parser_context: A parser context object (instance of ParserContext).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
Returns:
An event object (instance of EventObject) or None.
"""
@abc.abstractmethod
def VerifyStructure(self, parser_context, line):
"""Verify the structure of the file and return boolean based on that check.
This function should read enough text from the text file to confirm
that the file is the correct one for this particular parser.
Args:
parser_context: A parser context object (instance of ParserContext).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
class PyparsingMultiLineTextParser(PyparsingSingleLineTextParser):
"""Multi line text parser based on the pyparsing library."""
BUFFER_SIZE = 2048
def __init__(self):
"""A constructor for the pyparsing assistant."""
super(PyparsingMultiLineTextParser, self).__init__()
self._buffer = ''
self._buffer_size = self.BUFFER_SIZE
def _FillBuffer(self, filehandle):
"""Fill the buffer."""
if len(self._buffer) > self._buffer_size:
return
self._buffer += filehandle.read(self._buffer_size)
# If a parser specifically indicates specific encoding we need
# to handle the buffer as it is an unicode string.
# If it fails we fail back to the original raw string.
if self.encoding:
try:
buffer_decoded = self._buffer.decode(self.encoding)
self._buffer = buffer_decoded
except UnicodeDecodeError:
pass
def _NextLine(self, filehandle):
"""Move to the next newline in the buffer."""
throw, _, self._buffer = self._buffer.partition('\n')
if throw.startswith('\r'):
throw = throw[1:]
self._current_offset += 1
self._current_offset += 1 + len(throw)
self._FillBuffer(filehandle)
return throw
def Parse(self, parser_context, file_entry):
"""Parse a text file using a pyparsing definition.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
Yields:
An event object (instance of EventObject).
"""
self.file_entry = file_entry
file_object = file_entry.GetFileObject()
if not self.LINE_STRUCTURES:
raise errors.UnableToParseFile(
u'Line structure undeclared, unable to proceed.')
file_object.seek(0, os.SEEK_SET)
self._buffer = ''
self._FillBuffer(file_object)
if not utils.IsText(self._buffer):
raise errors.UnableToParseFile(u'Not a text file, unable to proceed.')
if not self.VerifyStructure(parser_context, self._buffer):
raise errors.UnableToParseFile('Wrong file structure.')
# Set the offset to the beginning of the file.
self._current_offset = 0
# Read every line in the text file.
while self._buffer:
# Initialize pyparsing objects.
tokens = None
start = 0
end = 0
structure_key = None
# Try to parse the line using all the line structures.
for key, structure in self.LINE_STRUCTURES:
try:
parsed_structure = next(
structure.scanString(self._buffer, maxMatches=1), None)
except pyparsing.ParseException:
continue
if not parsed_structure:
continue
tokens, start, end = parsed_structure
# Only want to parse the structure if it starts
# at the beginning of the buffer.
if start == 0:
structure_key = key
break
if tokens and not start:
parsed_event = self.ParseRecord(parser_context, structure_key, tokens)
if parsed_event:
parsed_event.offset = self._current_offset
parser_context.ProduceEvent(
parsed_event, parser_name=self.NAME, file_entry=file_entry)
self._current_offset += end
self._buffer = self._buffer[end:]
else:
old_line = self._NextLine(file_object)
if old_line:
logging.warning(u'Unable to parse log line: {0:s}'.format(
repr(old_line)))
# Re-fill the buffer.
self._FillBuffer(file_object)
|
from django.urls import path
from django.contrib.auth import views as auth_views
from django.contrib import admin
from . import views
urlpatterns = [
path("signup", views.signup, name="signup"),
path("logout", auth_views.LogoutView.as_view(), name='logout'),
path("login" , auth_views.LoginView.as_view(), name='login'),
]
|
import sqlite3
from flask import Flask, jsonify, json, render_template, request, url_for, redirect, flash
from werkzeug.exceptions import abort
import logging
from multiprocessing import Value
counter = Value('i', 0)
length = Value('i', 0)
# Function to get a database connection.
# This function connects to database with the name `database.db`
def get_db_connection():
connection = sqlite3.connect('database.db')
connection.row_factory = sqlite3.Row
with counter.get_lock():
counter.value += 1
return connection
# Function to get a post using its ID
def get_post(post_id):
connection = get_db_connection()
post = connection.execute('SELECT * FROM posts WHERE id = ?',
(post_id,)).fetchone()
connection.close()
return post
# Define the Flask application
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your secret key'
# Define the main route of the web application
@app.route('/')
def index():
connection = get_db_connection()
posts = connection.execute('SELECT * FROM posts').fetchall()
with length.get_lock():
length.value = len(posts)
connection.close()
return render_template('index.html', posts=posts)
# Define how each individual article is rendered
# If the post ID is not found a 404 page is shown
@app.route('/<int:post_id>')
def post(post_id):
post = get_post(post_id)
if post is None:
return render_template('404.html'), 404
else:
return render_template('post.html', post=post)
# Define the About Us page
@app.route('/about')
def about():
return render_template('about.html')
# Define the post creation functionality
@app.route('/create', methods=('GET', 'POST'))
def create():
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not title:
flash('Title is required!')
else:
connection = get_db_connection()
connection.execute('INSERT INTO posts (title, content) VALUES (?, ?)',
(title, content))
connection.commit()
connection.close()
return redirect(url_for('index'))
return render_template('create.html')
@app.route("/healthz")
def healthcheck():
response = app.response_class(
response=json.dumps({"result": "OK - healthy"}),
status=200,
mimetype='application/json'
)
return response
@app.route("/metrics")
def metrics():
return jsonify(db_connection_count=counter.value, post_count=length.value)
# start the application on port 3111
if __name__ == "__main__":
logging.basicConfig(filename='app.log',level=logging.DEBUG)
app.run(host='0.0.0.0', port='3111')
|
# 给出一个完全二叉树,求出该树的节点个数。
#
# 说明:
#
# 完全二叉树的定义如下:在完全二叉树中,除了最底层节点可能没填满外,其余每层节点数都达到最大值,并且最下面一层的节点都集中在该层最左边的若干位置。若最底层为第 h 层,则该层包含 1~ 2h 个节点。
#
# 示例:
#
# 输入:
# 1
# / \
# 2 3
# / \ /
# 4 5 6
#
# 输出: 6
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/count-complete-tree-nodes
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# solution 1: 直接递归, 但是没用到题目给到的完全二叉树的前提
# def countNodes(self, root: TreeNode) -> int:
# if not root:
# return 0
# return self.countNodes(root.left) + self.countNodes(root.right) + 1
# solution 2: 利用性质:完全二叉树最左叶子节点就是整个树的最大高度
def countNodes(self, root: TreeNode) -> int:
# 计算二叉树的最大高度, 利用性质:完全二叉树最左叶子节点就是整个树的最大高度
def max_deep(r: TreeNode) -> int:
if not r:
return 0
ans = 1
while r.left:
ans += 1
r = r.left
return ans
if not root:
return 0
# 不含根节点
max_deep_l = max_deep(root.left)
max_deep_r = max_deep(root.right)
# 相等,说明左子树是满的
if max_deep_l == max_deep_r:
return 1 + 2 ** max_deep_l - 1 + self.countNodes(root.right)
# 左边大,说明右子树是满的
if max_deep_l > max_deep_r:
return 1 + 2 ** max_deep_r - 1 + self.countNodes(root.left)
# 不会走到这里,完全二叉树的左子树高度 >= 右子树高度
return 1
|
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import row_number, rank, dense_rank, percent_rank, ntile, cume_dist, lag, lead, col, avg, \
min, max, sum, round, count, datediff, unix_timestamp, stddev, collect_list, element_at, size, sort_array, \
broadcast, spark_partition_id, lit, coalesce
from pyspark.sql.window import Window
""" 1. Window function in spark
A window function performs a calculation across a set of rows(aka. Frame). The built-in
window functions provided by Spark SQL include two categories:
- Ranking functions:
- Analytic functions:
Window specification
To use window functions, we need to create a window specification. A window specification defines which rows
are included in the frame associated with a given input row. In another word, the window specification defines
the default frame of a window. A window specification can be classified into three categories:
1. PartitionBy specification:
- Created with Window.partitionBy on one or more columns
- All rows that have the same value on the partitionBy column will be in the same frame.
- The aggregation functions can be applied on each frame
- The windows functions can not be applied.
2. Ordered specification:
- Created by using a partitionBy specification, followed by an orderBy specification
- The frame is not static, it moves when we iterate each row. By default, the frame contains
all previous rows and the currentRow.
- The window function can be applied to each moving frame (i.e. currentRow+allPreviousRow)
- The aggregation functions can be applied to each moving frame. As each row has a different
frame, the result of the aggregation is different for each row. Unlike the partitionBy
specification, all rows in the same partition has the same result.
3. Custom Range Frame specification: (check exp4)
- Created by using a partitionBy specification,
- Usually followed by an orderBy specification,
- Then followed by "rangeBetween" or "rowsBetween"
- Each row has a corresponding frame which is controlled by rangeBetween or rowsBetween. For example,
rowsBetween(-3,Window.currentRow) means the three rows preceding the current row to the current row.
It defines a frame including the current input row and three rows appearing before the current row.
- Aggregation can be applied on each frame.
In spark SQL, the partition specification are defined by keyword "partitionBy", ordering specification is defined by
keyword "orderBy".
1. Ranking functions:
- rank: returns the rank of rows within a window partition
- dense_rank: returns the rank of rows within a window partition, without any gaps. For example,
if you were ranking a competition using dense_rank and had three people tie for second place,
you would say that all three were in second place and that the next person came in third.
Rank would give me sequential numbers, making the person that came in third place (after the ties)
would register as coming in fifth.
- percent_rank: returns the relative rank (i.e. percentile) of rows within a window partition.
- ntile(n:Int): returns the ntile group id (from 1 to n inclusive) in an ordered window partition. For
example, if n is 4, the first quarter of the rows will get rank 1, the second quarter will
get 2, the thirds quarter will get 3, and the last will get 4. If the rows are less than n, it
works too.
- row_number: returns a sequential number starting at 1 within a window partition.
2. Analytic functions:
- cume_dist: returns the cumulative distribution of values within a window partition, i.e. the fraction
of rows that are below the current row. N = total number of rows in the partition.
cumeDist(x) = number of values before (and including) x / N. similar to percent_rank()
- first()
- last()
- lag(e:Column,offset:Int,defaultValue:Object): returns the value that is offset rows before the current row,
and null if there is less than offset rows before row. For example, an offset of one will return
the previous row at any given point in the window partition. The defaultValue is optional
- lead(e:Column,offset:Int): returns the value that is offset rows after the current row, and null if
there is less than offset rows after the current row. For example, an offset of one will return
the next row at any given point in the window partition.
- currentRow(): Window function: returns the special frame boundary that represents the current row in
the window partition.
3. Aggregation functions:
All the aggregation function that we showed in S03_GroupByAndAggregation can be used here.
- sum(e:Column): returns the sum of selecting column for each partitions.
- first(e:Column): returns the first value within each partition.
- last(e:Column): returns the last value within each partition.
"""
""" Exp1
We show examples of Ranking functions on ordered frame:
- row_number
- rank
- dense_rank
- percent_rank
- ntile
Note all above window functions require that the frame are ordered. You can try to
replace win_name_ordered by win_name and see what happens.
"""
def exp1(df: DataFrame):
# create a window specification
# This specification contains two partition "Alex", "Bob", each partition is ordered by price in ascending order.
win_name = Window.partitionBy("name")
win_name_ordered = win_name.orderBy("price")
# Create a column with row number
# You can notice the row number restarted from 1 for Bob, because it's in a new partition
df1 = df.withColumn("row_number", row_number().over(win_name_ordered))
print("Exp1: row number over name window order by price")
df1.printSchema()
df1.show()
print("Exp1: show partition id after window functions")
df1.withColumn("partition_id", spark_partition_id()).show(truncate=False)
# create a column with rank
# Note that for Alex partition, there is no rank2, because we have two items in rank 1, the third item goes to
# rank 3. If you want compact rank number, use dense rank
df2 = df.withColumn("rank", rank().over(win_name_ordered))
print("Exp1: rank over name window order by price")
df2.printSchema()
df2.show()
# create a column with dense rank
# Note that for Alex partition, even thought we have two items in rank 1, but the third item goes to
# rank 2 not 3.
df3 = df.withColumn("dense_rank", dense_rank().over(win_name_ordered))
print("Exp1: dense rank over name window order by price")
df3.printSchema()
df3.show()
# create a column with percent rank, the percent is calculate by dense_rank_number/total_item_number
df4 = df.withColumn("percent_rank", percent_rank().over(win_name_ordered))
print("Exp1: percent rank over name window order by price")
df4.printSchema()
df4.show()
# create a column with ntile
df4 = df.withColumn("ntile_rank", ntile(3).over(win_name_ordered))
print("Exp1: ntile over name window order by price")
df4.printSchema()
df4.show()
""" Exp2
show example of the analytic functions on ordered frame
- cume_dist
- lag
- lead
Note all above window functions require that the frame are ordered.
"""
def exp2(df: DataFrame):
win_name = Window.partitionBy("name")
win_name_ordered = win_name.orderBy("price")
# create a cumulative_distribution column
df1 = df.withColumn("cumulative_distribution", cume_dist().over(win_name_ordered))
print("Exp2 create a cumulative_distribution column")
df1.printSchema()
df1.show()
# create a lag column by using price.
# note if we set offset as 2, the first two row of lag is null, and the third rows gets the first row value of the
# price column. If we set offset as 3, the first three rows will be null, and the fourth rows get the first row
# value.
df2 = df.withColumn("lag", lag("price", 3).over(win_name_ordered))
print("Exp2 create a lag column")
df2.printSchema()
df2.show()
# create a lead column by using price.
# note if we set offset as 2, the last two row of lead is null in each partition, and the last third row gets the
# value of last row of the price column. If we set offset as 3, the last three rows will be null, and the last
# fourth rows get the last row value.
df3 = df.withColumn("lead", lead("price", 3).over(win_name_ordered))
print("Exp2 create a lead column")
df3.printSchema()
df3.show()
# here we set lag on date column with offset 1, it means the second row will have the value of first row, then
# apply the datediff function on this value with the current row date value, then we get days from the last
# purchase.
# Use the same logic by using lead, we get the days before next purchase, if we set offset as 2, we will get
# the days before next 2 purchase
df4 = df.withColumn('days_from_last_purchase', datediff('date', lag('date', 1).over(win_name.orderBy(col('date'))))) \
.withColumn('days_before_next_purchase', datediff(lead('date', 1).over(win_name.orderBy(col('date'))), 'date'))
print("Exp2 Practical example of lead and lag")
df4.show()
"""Exp3
Show aggregation functions on ordered frame and basic partitionBy frame
- avg/mean
- min
- max
- sum
In df1, we use a partition window specification, so the result is the same for all rows
that are in the same partition.
In df2, we use an ordered window specification, the result is different for each rows.
"""
def exp3(df: DataFrame):
win_name = Window.partitionBy("name")
win_name_ordered = win_name.orderBy("date")
df1 = df.withColumn("avg", avg(col("price")).over(win_name)) \
.withColumn("sum", sum(col("price")).over(win_name)) \
.withColumn("min", min(col("price")).over(win_name)) \
.withColumn("max", max(col("price")).over(win_name)) \
.withColumn("item_number", count("*").over(win_name)) \
.withColumn("item_list", collect_list(col("product")).over(win_name))
print("Exp3 show aggregation function example on partition window specification")
df1.show(truncate=False)
# if you apply aggregation function on a windows spec with order, you will get a cumulative result for each rows
df2 = df.withColumn('avg_to_date', round(avg('price').over(win_name_ordered), 2)) \
.withColumn('sum_to_date', sum('price').over(win_name_ordered)) \
.withColumn('max_to_date', max('price').over(win_name_ordered)) \
.withColumn('min_to_date', max('price').over(win_name_ordered)) \
.withColumn('item_number_to_date', count('*').over(win_name_ordered)) \
.withColumn("item_list_to_date", collect_list(col("product")).over(win_name_ordered))
print("Exp3 show aggregation function example on ordered window specification")
df2.show(truncate=False)
""" Exp4
To build range window specifications, we need to use the two following functions
- rowsBetween(start:Long,end:Long)->WindowSpec : Here start, end are the index of rows relative to current rows, -1
means 1 row before current row, 1 mean 1 row after current row
- rangeBetween(start:Long, end:Long)->WindowSpec : The start, end boundary in rangeBetween is based on row value
relative to currentRow. The value definition of the constant values used in range functions:
-- Window.currentRow = 0
-- Window.unboundedPreceding = Long.MinValue
-- Window.unboundedFollowing = Long.MaxValue
The [start, end] index are all inclusive. Their value can be
- Window.unboundedPreceding
- Window.unboundedFollowing
- Window.currentRow.
- Or a value relative to Window.currentRow, either negative or positive.
Some examples of rowsBetween:
- rowsBetween(Window.currentRow, 2): From current row to the next 2 rows
- rowsBetween(-3, Window.currentRow): From the previous 3 rows to the current row.
- rowsBetween(-1, 2): Frame contains previous row, current row and the next 2 rows
- rowsBetween(Window.currentRow, Window.unboundedFollowing): From current row to all next rows
- rowsBetween(Window.unboundedPreceding, Window.currentRow): From all previous rows to the current row.
- rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing): all rows in the window.
"""
# we have 86400 seconds in a day
def day_to_seconds(day_num: int):
return day_num * 86400
def exp4(df: DataFrame):
win_name = Window.partitionBy("name")
win_name_ordered = win_name.orderBy("date")
# Example of rowsBetween
# last 2 row(current and the row before it) range window specification
last2 = win_name_ordered.rowsBetween(-1, Window.currentRow)
df.withColumn("max_of_last2", max("price").over(last2)).show(truncate=False)
# max of all following row
following = win_name_ordered.rowsBetween(Window.currentRow, Window.unboundedFollowing)
df.withColumn("max_of_following", max("price").over(following)).show(truncate=False)
#
df1 = df.withColumn("unix_date", unix_timestamp("date", "yyyy-MM-dd"))
print("Exp4 convert string date to long unix timestamp")
df1.show(5, truncate=False)
# Example of rangeBetween
# get the avg of a specific range of a window
# 0 is the relative unix_date of current row, the frame boundary of rangeBetween(-day_to_seconds(30), 0)
# for row "Alex|2018-02-18|Gloves |5 |1518908400|" will be (1518908400-(30*86400),1518908400). All rows that
# have unix_date column value in this frame boundary will be included in the frame.
range_30 = win_name.orderBy(col("unix_date")).rangeBetween(-day_to_seconds(30), 0)
df2 = df1.withColumn("30day_moving_avg", avg("price").over(range_30))
print("Exp4 create a column that shows last 30 day avg before current row date")
df2.show(10, truncate=False)
# get the avg of 30 day before and 15 days after the current row date
# Note that stddev of some row will return null. Because it requires at least two
# observations to calculate standard deviation.
range_45 = win_name.orderBy("unix_date").rangeBetween(-day_to_seconds(30), day_to_seconds(15))
df3 = df1.withColumn("45day_moving_avg", avg("price").over(range_45)) \
.withColumn("45day_moving_std", stddev("price").over(range_45))
print("Exp4 create a column that shows the avg of 30 day before and 15 days after the current row date")
df3.show(10, truncate=False)
""" Exp5 Calculate Median in a window
mean(avg) and median are commonly used in statistics.
- mean is cheap to calculate, but outliers can have large effect. For example, the income of population, if we have 9
people has 10 dollar, and 1 person has 1010 dollar. The mean is 1100/10= 110. It does not represent any group's income.
- Median is expansive to calculate. But in certain cases median are more robust comparing to mean, since it will
filter out outlier values. If we retake the previous example, the median will be 10 dollar, which represent a
group's income
"""
def exp5(df: DataFrame):
win_name = Window.partitionBy("name")
win_name_ordered = win_name.orderBy("price")
# Rolling median
# we create a column of price list, then we use function element_at to get the middle element of the list
print("Exp5 Calculate rolling median for price column")
df.withColumn("price_list", collect_list("price").over(win_name_ordered)) \
.withColumn("rolling_median", element_at("price_list", (size("price_list") / 2 + 1).cast("int"))) \
.show(truncate=False)
# Global median with partition frame,
# as the window is not ordered, all element of the partition are in the same frame. The problem is the element
# of the list is not ordered, so the middle element of the list is not the median. To correct this, we need to
# sort the list
print("Exp5 Calculate global median for price column by using partition window and sort_array ")
df.withColumn("price_list", sort_array(collect_list("price").over(win_name))) \
.withColumn("sort_list_median", element_at("price_list", (size("price_list") / 2 + 1).cast("int"))) \
.show(truncate=False)
# Global median with range frame
# After orderBy, each row will have a rolling frame. To include all rows of the partition, we need to use range
# specification to change the default frame after orderBy.
# We use rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing) to include all rows of the partition
print("Exp5 Calculate global median for price column by using range frame")
# create a range window spec that contains all rows of the partition
win_range = win_name_ordered.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn("price_list", collect_list("price").over(win_range)) \
.withColumn("range_window_median", element_at("price_list", (size("price_list") / 2 + 1).cast("int"))) \
.show(truncate=False)
# We can also use groupBy and join to get the same result
df1 = df.groupBy("name").agg(sort_array(collect_list("price")).alias("price_list")) \
.select("name", "price_list",
element_at("price_list", (size("price_list") / 2 + 1).cast("int")).alias("groupBy_median"))
df1.show(truncate=False)
# The pyspark.sql.functions.broadcast(df) marks a DataFrame as small enough for use in broadcast joins.
df.join(broadcast(df1), "name", "inner").show(truncate=False)
def main():
spark = SparkSession.builder.master("local[2]").appName("Windows functions").getOrCreate()
data = [('Alex', '2018-10-10', 'Paint', 80),
('Alex', '2018-04-02', 'Ladder', 20),
('Alex', '2018-06-22', 'Stool', 20),
('Alex', '2018-12-09', 'Vacuum', 40),
('Alex', '2018-07-12', 'Bucket', 5),
('Alex', '2018-02-18', 'Gloves', 5),
('Alex', '2018-03-03', 'Brushes', 30),
('Alex', '2018-09-26', 'Sandpaper', 10),
('Bob', '2018-12-09', 'Vacuum', 40),
('Bob', '2018-07-12', 'Bucket', 5),
('Bob', '2018-02-18', 'Gloves', 5),
('Bob', '2018-03-03', 'Brushes', 30),
('Bob', '2018-09-26', 'Sandpaper', 10)]
df = spark.createDataFrame(data, schema=['name', 'date', 'product', 'price'])
print("source data frame: ")
df.printSchema()
df.show(truncate=False)
# run exp1
# exp1(df)
# run exp2
# exp2(df)
# run exp3
# exp3(df)
# run exp4
# exp4(df)
# run exp5
# exp5(df)
if __name__ == "__main__":
main()
|
"""Copy database structures to a new database file."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
import os
import schevo.database
from schevo.script.command import Command
from schevo.script import opt
usage = """\
schevo db copy [options] SRCURL DESTURL
SRCURL: The database to copy the internal structures from.
DESTURL: The empty database to copy internal structures to."""
def _parser():
p = opt.parser(usage)
return p
class Copy(Command):
name = 'Copy'
description = 'Copy database structures to a new database file.'
def main(self, arg0, args):
print
print
parser = _parser()
options, args = parser.parse_args(list(args))
if len(args) != 2:
parser.error('Please specify SRCURL and DESTURL.')
src_url, dest_url = args
print 'Copying %r to %r...' % (src_url, dest_url)
schevo.database.copy(src_url, dest_url)
print 'Copy complete.'
start = Copy
|
"""
:mod:`wsgi`
===========
Provide a WSGI callable.
(It could be nice if this was at ``pando:wsgi`` instead of ``pando.wsgi:website``,
but then Website would be instantiated every time you import the ``pando`` module.
Here, it's only instantiated when you pass this to a WSGI server like gunicorn,
spawning, etc.)
"""
from .website import Website
#: This is the WSGI callable, an instance of :class:`.Website`.
website = Website()
#: Alias of ``website``. A number of WSGI servers look for this name by default,
#: for example running ``gunicorn pando.wsgi`` works.
application = website
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import SpanSelector
import SpectrumPlotter
import ImagePlotter
import Image
import collections
import os
from mpl_toolkits.axes_grid1 import host_subplot
import matplotlib
matplotlib.use('TkAgg')
'''Things to do:
Add vertex addition and deletion after creation for patches
Why doesn't it work in multiple figures? --need to keep a reference to each one
Figure out changing contrast of extracted patch with spectrum spanselector
magic wand tool - auto-select regions of high intensity
'''
class SpectrumImagePlotter(object):
def __init__(self, SI, filepath=os.getcwd(), cmap_im='gray'):
'''Plot a 3D spectrum image array
Input: 3D numpy array
Optional argument: filepath to save spectra and images to
Interactive commands on image axis:
enter: plot spectrum underneath current patch mask
e: save image (not including contrast limits)
n: start new polygon
+: make new polygon group (for new spectrum)
up, down arrow keys: move active polygon selection between groups
left, right arrow keys: move active polygon selection to next polygon inside group
m: provide handles on the vertices to adjust the polygon shape
Interactive commands on spectrum axis:
Click and drag over the spectrum to plot the slice in the image axis
e: save spectrum as csv
Interactive commands on contrast axis:
Click and drag to select contrast range
Interactive commands on extracted image axis:
e: Save current patch to png'''
self.filepath = filepath
self.SI = SI
if self.SI.spectrum_units is not None:
self.spec_transform = (self.SI.spectrum_units, self.SI.secondary_units)
else:
self.spec_transform = None
self.fig = plt.figure(figsize = (9,9), facecolor=[0.9, 0.9, 1])
self.image_ax = plt.axes([0., 0.475, 0.45, 0.45])
self.extracted_ax = plt.axes([0.45, 0.475, 0.45, 0.45])
self.spectrum_ax = host_subplot(111)
self.spectrum_ax.set_position([0.075, 0.07, 0.9, 0.35])
self.spectrum_ax.patch.set_facecolor([0.9, 0.9, 1])
self.contrast_ax = plt.axes([0.075, 0.925, 0.8, 0.075])
self.colourbar_ax = plt.axes([0.9, 0.475, 0.05, 0.45])
self.cmap = plt.get_cmap('Dark2')
self.cmap_im = plt.get_cmap(cmap_im)
# Spectrum axis plotting and interactive span
self.extracted_mask = np.zeros(self.SI.size[:2]).astype(bool)
mask3D = np.zeros(self.SI.size).astype(bool)
self.extracted_spectrum = self.SI.ExtractSpectrum(mask3D)
self.SpectrumPlot = SpectrumPlotter.SpectrumManager(
self.extracted_spectrum, self.spectrum_ax, ax_transform = self.spec_transform, cmap=self.cmap)
self.E_span = SpanSelector(self.spectrum_ax, self.SpectrumSpan, 'horizontal',
span_stays = True)
self.Emin_i = 0
self.Emax_i = len(self.extracted_spectrum.intensity)-1
# Contrast histogram plotting and interactive span
self.contrastbins = 256
# Image axis plotting and interactive patches
self.summedim = Image.Image(np.mean(self.SI.data[:, :, self.Emin_i:self.Emax_i], axis = 2))
self.cmin = self.summedim.Imglim[0]
self.cmax = self.summedim.Imglim[1]
self.ImagePlot = ImagePlotter.ImagePlotter(self.summedim,
self.image_ax,
self.colourbar_ax,
cmap = self.cmap_im,
filepath=self.filepath,
polygoncallback=self.ImageKeyCommands)
self.PlotImage()
self.PlotContrastHistogram()
self.extractedim = Image.Image(np.ma.masked_array(self.summedim.data, np.invert(self.extracted_mask)))
self.ExtractedImagePlot = collections.OrderedDict()
self.PlotExtractedImage()
self.connect()
def connect(self):
self.cidkey = self.image_ax.figure.canvas.mpl_connect('key_press_event',
self.keyboard_press)
def keyboard_press(self, event):
if event.inaxes == self.image_ax:
self.ImageKeyCommands(event.key)
elif event.inaxes == self.extracted_ax or event.inaxes == self.spectrum_ax:
self.ExtractedImageKeyCommands(event.key)
def ExtractedImageKeyCommands(self, key):
if key == 'e':
self.SaveSpectrumAndPatch() ## need to fix it so spectrum & patch come out with same name!
## Patches are no longer transparent when saving!
def SaveSpectrumAndPatch(self):
spectrumID = self.ImagePlot.PolygonGroups.currentID
filename_spectrum = os.path.join(self.filepath, self.fig.canvas.get_window_title() + '_Spectrum_'+str(spectrumID)+'.csv')
self.extracted_spectrum.SaveSpectrumAsCSV(filename_spectrum)
filename_patch = os.path.join(self.filepath, self.fig.canvas.get_window_title() + '_Patch_'+str(spectrumID)+'.png')
self.ExtractedImagePlot[self.ImagePlot.PolygonGroups.currentID].save_image(filename_patch)
def ImageKeyCommands(self, key):
if key == 'enter':
MaskState = self.ImagePlot.PolygonGroups.ToggleActiveMask()
if MaskState:
mask = self.ImagePlot.PolygonGroups.GetActiveMask(self.summedim.size)
mask3D = np.reshape(mask,
(self.SI.size[0], self.SI.size[1], 1)) * np.ones((
self.SI.size[0], self.SI.size[1], self.SI.size[2])).astype(bool)
mask_im = np.ma.masked_array(np.ones(np.shape(self.summedim.data))*mask.astype(int), np.invert(mask))
self.extractedim = Image.Image(mask_im)
self.AddExtractedImagePatch(self.ImagePlot.PolygonGroups.currentID)
self.extracted_spectrum = self.SI.ExtractSpectrum(np.invert(mask3D))
self.SpectrumPlot.update_spectrum(self.extracted_spectrum,
self.ImagePlot.PolygonGroups.currentID)
self.SpectrumPlot.make_visible(self.ImagePlot.PolygonGroups.currentID)
else:
self.SpectrumPlot.make_invisible(self.ImagePlot.PolygonGroups.currentID)
self.RemoveExtractedImagePatch(self.ImagePlot.PolygonGroups.currentID)
elif key == 'e':
filename_addon = ('%.4g' % (self.SpectrumPlot.SpectrumPlot.spectrum.SpectrumRange[self.Emin_i])+'to'+
'%.4g' % (self.SpectrumPlot.SpectrumPlot.spectrum.SpectrumRange[self.Emax_i])+
self.SpectrumPlot.SpectrumPlot.spectrum.units)
filename_image = (self.fig.canvas.get_window_title() + '_Image_' + filename_addon + '.png')
filename_colourbar = (self.fig.canvas.get_window_title() + '_Colourbar_' + filename_addon + '.png')
self.ImagePlot.save_image(os.path.join(self.filepath, filename_image))
self.ImagePlot.save_colourbar(os.path.join(self.filepath, filename_colourbar))
else:
self.ImagePlot.image_key_commands(key)
def PlotSpectrum(self):
SpectrumPlot = SpectrumPlotter.SpectrumManager(
self.extracted_spectrum, self.spectrum_ax, self.cmap)
return SpectrumPlot
def PlotContrastHistogram(self):
if isinstance(self.summedim.data, np.ma.core.MaskedArray):
self.summedimhist, self.summedimbins = np.histogram(self.summedim.data.compressed(), bins = self.contrastbins)
else:
self.summedimhist, self.summedimbins = np.histogram(self.summedim.data, bins = self.contrastbins)
self.contrast_ax.cla()
self.contrast_ax.plot(self.summedimbins[:-1], self.summedimhist, color = 'k')
self.contrast_ax.set_axis_off()
self.contrast_span = SpanSelector(self.contrast_ax, self.ContrastSpan, 'horizontal',
span_stays = True, rectprops = dict(alpha = 0.5, facecolor = 'green'))
def PlotImage(self):
self.ImagePlot.RemoveImage()
self.ImagePlot.ReplotImage(self.summedim, clim=[self.cmin, self.cmax])
self.image_ax.set_axis_off()
def PlotExtractedImage(self):
self.extracted_ax.cla()
self.extracted_ax.set_axis_off()
self.ExtractedImagePlot[self.ImagePlot.PolygonGroups.currentID] = ImagePlotter.ImagePlotter(self.summedim, self.extracted_ax, polygoncallback=self.ExtractedImageKeyCommands)
self.ExtractedImagePlot[self.ImagePlot.PolygonGroups.currentID].PlottedImage.set_alpha(0.1)
def AddExtractedImagePatch(self, ID):
self.ExtractedImagePlot[self.ImagePlot.PolygonGroups.currentID] = ImagePlotter.ImagePlotter(self.extractedim, self.extracted_ax, polygoncallback=self.ExtractedImageKeyCommands)
def RemoveExtractedImagePatch(self, ID):
self.ExtractedImagePlot[ID].PlottedImage.remove()
pass
def AdjustContrastExtractedImage(self):
for (ID, image) in self.ExtractedImagePlot.items():
image.PlottedImage.set_clim(vmin = self.cmin, vmax = self.cmax)
def SpectrumSpan(self, Emin, Emax): ##Note: draws sub-pixel Espan, fix?
Emin = np.max((np.round(Emin/self.SI.dispersion) * self.SI.dispersion,
self.SI.SpectrumRange[0]))
Emax = np.min((np.round(Emax/self.SI.dispersion) * self.SI.dispersion,
self.SI.SpectrumRange[-1]))
self.Emin_i = np.where(self.SpectrumPlot.SpectrumPlot.spectrum.SpectrumRange == Emin)[0]
self.Emax_i = np.where(self.SpectrumPlot.SpectrumPlot.spectrum.SpectrumRange == Emax)[0]
self.Emin_i = np.searchsorted(self.SpectrumPlot.SpectrumPlot.spectrum.SpectrumRange, Emin)
self.Emax_i = np.searchsorted(self.SpectrumPlot.SpectrumPlot.spectrum.SpectrumRange, Emax)
self.summedim = Image.Image(np.mean(self.SI.data[:, :, self.Emin_i:self.Emax_i], axis = 2))
self.cmin = self.summedim.Imglim[0]
self.cmax = self.summedim.Imglim[1]
self.PlotImage()
self.PlotContrastHistogram()
def ContrastSpan(self, cmin, cmax):
self.cmin = np.max((cmin, self.ImagePlot.Image.Imglim[0]))
self.cmax = np.min((cmax, self.ImagePlot.Image.Imglim[1]))
self.PlotImage()
def ShowPlot(self):
plt.show()
|
from main import calc_angle_mbc
import unittest
class CalcAngleTests(unittest.TestCase):
def test_angle_one(self):
self.assertEqual(calc_angle_mbc(10, 10), 45)
def test_angle_two(self):
self.assertEqual(calc_angle_mbc(1, 10), 6)
def test_angle_three(self):
self.assertEqual(calc_angle_mbc(100, 1), 89)
if __name__ == '__main__':
unittest.main()
|
from collections import deque
def find_route_dfs(graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if start not in graph:
return None
for node in graph[start]:
if node not in path:
newpath = find_route_dfs(graph, node, end, path)
if newpath:
return newpath
return None
def find_route_bfs(graph, start, end):
if start not in graph:
return None
queue = deque([(start, [start])])
visited = set(start)
while queue:
(v, path) = queue.popleft()
for next in graph[v]:
if next not in visited:
if next == end:
return path + [next]
visited.add(next)
queue.append((next, path + [next]))
return None
def find_route_bfs2(graph, start, end):
if start not in graph:
return None
if end not in graph:
return None
queue_left = deque([start])
visited_left = {start: [start]}
queue_right = deque([end])
visited_right = {end: [end]}
while queue_left and queue_right:
if queue_left:
v = queue_left.popleft()
for next in graph[v]:
if next not in visited_left:
if next in visited_right:
return visited_left[v] + visited_right[next]
visited_left[next] = visited_left[v] + [next]
queue_left.append(next)
if queue_right:
v = queue_right.popleft()
for next in graph[v]:
if next not in visited_right:
if next in visited_left:
return visited_left[next] + visited_right[v]
visited_right[next] = [next] + visited_right[v]
queue_right.append(next)
return None
graph = {'A': ['B', 'C'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F'],
'F': ['C']}
print(find_route_dfs(graph, 'A', 'D'))
print(find_route_bfs(graph, 'A', 'D'))
print(find_route_bfs2(graph, 'A', 'D'))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Holger Nahrstaedt
# See COPYING for license details.
"""
Helper function for annotations
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
__all__ = ['gradient', 'despike_phasespace3d','excludeoutlier_ellipsoid3d']
def gradient(f):
return np.concatenate((np.array([0]),np.diff(f)))
def excludeoutlier_ellipsoid3d(xi,yi,zi,theta):
"""
This program excludes the points outside of ellipsoid in two-
dimensional domain
Input
xi : input x data
yi : input y data
zi : input z data
theta : angle between xi and zi
Output
xp : excluded x data
yp : excluded y data
zp : excluded y data
ip : excluded array element number in xi and yi
coef : coefficients for ellipsoid
Example:
[xp,yp,zp,ip,coef] = func_excludeoutlier_ellipsoid3d(f,f_t,f_tt,theta);
Copyright:
Nobuhito Mori, Kyoto University
Holger Nahrstaedt
"""
n = np.max(xi.shape)
_lambda = np.sqrt(2*np.log(n))
xp = np.array([])
yp = np.array([])
zp = np.array([])
ip = np.array([])
#
# --- rotate data
#
#theta = atan2( sum(xi.*zi), sum(xi.^2) );
if theta == 0:
X = xi
Y = yi
Z = zi
else:
R = np.zeros((3,3))
R[0,:] = [np.cos(theta), 0, np.sin(theta)]
R[1,:] = [0,1,0]
R[2,:] = [-np.sin(theta), 0, np.cos(theta)]
X = xi*R[0,0] + yi*R[0,1] + zi*R[0,2]
Y = xi*R[1,0] + yi*R[1,1] + zi*R[1,2]
Z = xi*R[2,0] + yi*R[2,1] + zi*R[2,2]
#test
#plot3(xi,yi,zi,'b*')
#hold on
# plot3(X,Y,Z,'r*')
#hold off
#pause
#
# --- preprocess
#
a = _lambda*np.nanstd(X)
b = _lambda*np.nanstd(Y)
c = _lambda*np.nanstd(Z)
#
# --- main
#
for i in np.arange(n):
x1 = X[i]
y1 = Y[i]
z1 = Z[i]
# point on the ellipsoid
x2 = a*b*c*x1/np.sqrt((a*c*y1)**2+(b**2)*((c**2)*(x1**2)+(a**2)*(z1**2)))
y2 = a*b*c*y1/np.sqrt((a*c*y1)**2+(b**2)*((c**2)*(x1**2)+(a**2)*(z1**2)))
zt = (c**2)* ( 1 - (x2/a)**2 - (y2/b)**2 )
if z1 < 0:
z2 = -np.sqrt(zt)
elif z1 > 0:
z2 = np.sqrt(zt)
else:
z2 = 0
# check outlier from ellipsoid
dis = (x2**2 + y2**2 + z2**2) - (x1**2 + y1**2 + z1**2)
if dis < 0:
ip = np.append(ip,i)
xp = np.append(xp,xi[i])
yp = np.append(yp,yi[i])
zp = np.append(zp,zi[i])
coef = np.zeros(3)
coef[0] = a
coef[1] = b
coef[2] = c
return (xp,yp,zp,ip,coef)
def despike_phasespace3d( fi, i_plot = 0, i_opt=0 ):
"""
This subroutine excludes spike noise from Acoustic Doppler
Velocimetry (ADV) data using phase-space method, using
modified Goring and Nikora (2002) method by Nobuhito Mori (2005).
Further modified by Joseph Ulanowski to remove offset in output (2014).
Input
fi : input data with dimension (n,1)
i_plot : =9 plot results (optional)
i_opt : = 0 or not specified ; return spike noise as NaN
= 1 ; remove spike noise and variable becomes shorter than input length
= 2 ; interpolate NaN using cubic polynomial
Output
fo : output (filtered) data
ip : excluded array element number in fi
Example:
[fo, ip] = func_despike_phasespace3d( fi, 9 );
or
[fo, ip] = func_despike_phasespace3d( fi, 9, 2 );
Copyright:
Holger Nahrstaedt - 2016
Nobuhito Mori
Disaster Prevention Research Institue
Kyoto University
mori@oceanwave.jp
"""
#
# --- initial setup
#
fi = fi.flatten()
# number of maximum iternation
n_iter = 20
n_out = 999
n = np.size(fi)
f_mean = 0 # do not calculate f_mean here, as it will be affected by spikes (was: f_mean = nanmean(fi);)
f = fi # this offset subtraction is unnecessary now (was: f = fi - f_mean;)
_lambda = np.sqrt(2*np.log(n))
#
# --- loop
#
n_loop = 1
while (n_out != 0) and (n_loop <= n_iter):
#
# --- main
#
# step 0
f_mean=f_mean+np.nanmean(f) # accumulate offset value at each step [J.U.]
f = f - np.nanmean(f)
#nanstd(f)
# step 1: first and second derivatives
#f_t = gradient(f);
#f_tt = gradient(f_t);
f_t = gradient(f)
f_tt = gradient(f_t)
# step 2: estimate angle between f and f_tt axis
if n_loop==1:
theta = np.arctan2( np.sum(f*f_tt), np.sum(f**2) )
# step 3: checking outlier in the 3D phase space
[xp,yp,zp,ip,coef] = excludeoutlier_ellipsoid3d(f,f_t,f_tt,theta)
#
# --- excluding data
#
n_nan_1 = np.size(np.where(np.isnan(f)))
f[ip.astype(np.int)] = np.NAN
n_nan_2 = np.size(np.where(np.isnan(f)))
n_out = n_nan_2 - n_nan_1;
#
# --- end of loop
#
n_loop = n_loop + 1;
#
# --- post process
#
go = f + f_mean; # add offset back
ip = np.where(np.isnan(go))[0]
if n_loop < n_iter:
print('>> Number of outlier = %d, Number of iteration = %d'%(np.sum(np.isnan(f)),n_loop-1))
else:
print('>> Number of outlier = %d, Number of iteration = %d !!! exceed maximum value !!!'%(np.sum(np.isnan(f)),n_loop-1))
#
# --- interpolation or shorten NaN data
#
if i_opt >= 1:
# remove NaN from data
inan = np.where(~np.isnan(go))[0]
fo = go[inan]
# interpolate NaN data
if i_opt == 2:
x = np.where(~np.isnan(go))[0]
y = go[x]
xi = np.arange(np.size(fi))
fo = interp1d(x, y, kind='cubic')(xi)
else:
# output despiked value as NaN
fo = go
if i_plot == 9:
#theta/pi*180
F = fi - f_mean
F_t = gradient(F)
F_tt = gradient(F_t)
RF = np.zeros((3,3))
RF[0,:] = [np.cos(theta), 0, np.sin(theta)]
RF[1,:] = [0,1,0]
RF[2,:] = [-np.sin(theta), 0, np.cos(theta)]
RB = np.zeros((3,3))
RB[0,:] = [np.cos(theta), 0, -np.sin(theta)]
RB[1,:] = [0,1,0]
RB[2,:] = [np.sin(theta), 0, np.cos(theta)]
# making ellipsoid data
a = coef[0]
b = coef[1]
c = coef[2]
ne = 32;
dt = 2*np.pi/ne
dp = np.pi/ne
t = np.arange(0,2*np.pi,dt)
p = np.arange(0,2*np.pi,dp)
n_t = np.size(t)
n_p = np.size(p)
# making ellipsoid
xe = np.zeros(n_p*n_t+n_p)
ye = np.zeros(n_p*n_t+n_p)
ze = np.zeros(n_p*n_t+n_p)
for it in np.arange(n_t):
for _is in np.arange(n_p):
xe[n_p*it+_is] = a*np.sin(p[_is])*np.cos(t[it])
ye[n_p*it+_is] = b*np.sin(p[_is])*np.sin(t[it])
ze[n_p*it+_is] = c*np.cos(p[_is])
xer = xe*RB[0,0] + ye*RB[0,1] + ze*RB[0,2]
yer = xe*RB[1,0] + ye*RB[1,1] + ze*RB[1,2]
zer = xe*RB[2,0] + ye*RB[2,1] + ze*RB[2,2]
# plot figures
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(f,f_t,f_tt,'b*',markersize=3)
#hold on
ax.plot(F[ip],F_t[ip],F_tt[ip],'ro',markerfacecolor='r',markersize=5)
ax.plot(xer,yer,zer,'k-');
plt.xlabel('u');
plt.ylabel('\Delta u');
#plt.zlabel('\Delta^2 u');
fig2 = plt.figure()
plt.plot(fi,'k-')
plt.plot(ip,fi[ip],'ro')
if i_opt==2:
plt.plot(fo,'r-')
return (fo, ip)
|
FOREST, PLAINS, MOUNTAIN, HILLS, SWAMP, WATER, DESERT, FROZEN, JUNGLE, WASTELAND, BEACH, NETHER, END, MUSHROOM, MAGICAL.
|
from pyleap import *
elli = Ellipse(200, 200, 140, 60, "red")
def draw(dt):
window.clear()
elli.rotation += 1
elli.draw()
repeat(draw)
run()
|
# import cucim
import math
import numbers
import numpy as np
import uarray as ua
import dask.array as da
from skimage import filters
from skimage.util import apply_parallel
from skimage._backend import scalar_or_array
from skimage._shared.utils import _supported_float_type
# Backend support for skimage.filters
_implemented = {}
def _implements(skimage_func):
"""Decorator adds function to the dictionary of implemented functions"""
def inner(func):
_implemented[skimage_func] = func
return func
return inner
def _get_gaussian_sigmas(ndim, sigma):
nsigmas = np.array(sigma)
if nsigmas.ndim == 0:
nsigmas = np.array(ndim * [nsigmas[()]])
if nsigmas.ndim != 1:
raise RuntimeError(
"Must have a single sigma or a single sequence."
)
if ndim != len(nsigmas):
raise RuntimeError(
"Must have an equal number of sigmas to image dimensions."
)
if not issubclass(nsigmas.dtype.type, numbers.Real):
raise TypeError("Must have real sigmas.")
nsigmas = tuple(nsigmas)
return nsigmas
def _get_gaussian_border(ndim, sigma, truncate):
sigma = np.array(_get_gaussian_sigmas(ndim, sigma))
if not isinstance(truncate, numbers.Real):
raise TypeError("Must have a real truncate value.")
half_shape = tuple(np.ceil(sigma * truncate).astype(int))
return half_shape
def _insert(seq, channel_axis, value):
seq = list(seq)
seq.insert(channel_axis, value)
return tuple(seq)
@_implements(filters.gaussian)
def gaussian(image, sigma=1, output=None, mode='nearest', cval=0,
multichannel=False, preserve_range=False, truncate=4.0, *,
channel_axis=None):
if channel_axis is None and multichannel:
channel_axis = -1
multichannel = False
ndim = image.ndim if channel_axis is None else image.ndim - 1
sigma = _get_gaussian_sigmas(ndim, sigma)
depth = _get_gaussian_border(ndim, sigma, truncate)
if channel_axis is not None:
channel_axis = channel_axis % image.ndim
depth = _insert(depth, channel_axis, 0)
sigma = _insert(sigma, channel_axis, 0)
# depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none")
dtype = _supported_float_type(image.dtype)
if output is not None:
raise ValueError("output is unsupported")
# handled depth and sigma above, so set channel_axis to None
return apply_parallel(
filters.gaussian,
image,
depth=depth,
mode='wrap' if mode == 'wrap' else 'none',
extra_keywords=dict(sigma=sigma,
mode=mode,
cval=cval,
preserve_range=preserve_range,
truncate=truncate,
channel_axis=channel_axis),
dtype=dtype,
channel_axis=None,
)
gaussian.__doc__ = filters.gaussian.__doc__
@_implements(filters.difference_of_gaussians)
def difference_of_gaussians(image, low_sigma, high_sigma=None, *,
mode='nearest', cval=0, channel_axis=None,
truncate=4.0, multichannel=False):
if channel_axis is None and multichannel:
channel_axis = -1
multichannel = False
ndim = image.ndim if channel_axis is None else image.ndim - 1
low_sigma = _get_gaussian_sigmas(ndim, low_sigma)
if high_sigma is not None:
high_sigma = _get_gaussian_sigmas(ndim, high_sigma)
depth = _get_gaussian_border(ndim, high_sigma, truncate)
dtype = _supported_float_type(image.dtype)
if channel_axis is not None:
channel_axis = channel_axis % image.ndim
depth = _insert(depth, channel_axis, 0)
# handled depth above, so set channel_axis to None
return apply_parallel(
filters.difference_of_gaussians,
image,
depth=depth,
mode='wrap' if mode == 'wrap' else 'none',
extra_keywords=dict(low_sigma=low_sigma,
high_sigma=high_sigma,
mode=mode,
cval=cval,
truncate=truncate,
channel_axis=channel_axis),
dtype=dtype,
channel_axis=None,
)
difference_of_gaussians.__doc__ = filters.difference_of_gaussians.__doc__
@_implements(filters.median)
def median(image, footprint=None, out=None, mode='nearest', cval=0.0,
behavior='ndimage'):
depth = tuple(math.ceil(s / 2) for s in footprint.shape)
dtype = _supported_float_type(image.dtype)
footprint = np.asarray(footprint) # footprint should not be a dask array
if out is not None:
return NotImplemented
return apply_parallel(
filters.median,
image,
depth=depth,
mode='wrap' if mode == 'wrap' else 'none',
extra_keywords=dict(footprint=footprint,
mode=mode,
cval=cval,
behavior=behavior),
dtype=dtype,
)
median.__doc__ = filters.median.__doc__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.