text stringlengths 8 6.05M |
|---|
from Puzzle_8_node import *
from time import time
class Searcher(object):
"""Searcher that manuplate searching process."""
def __init__(self, start, goal):
self.start = start
self.goal = goal
def print_path(self, state):
path = []
while state:
path.append(state)
state = state.prev
path.reverse()
print("\n-->\n".join([str(state) for state in path]))
# teration_Limit if we dont have a solution
def SearchAlgorithm(self, iteration_Limit = 200000):
"""Run general search algorithm search."""
#TODO: Implement breadth first search
Openstates = [self.start]
#this is the Closed_states means the closed states
Closed_states = set()
reached_the_goal = False
state=start
# the Openstates means the open states
while (len(Openstates)!=0)&(state != self.goal):
state = Openstates.pop()
if state in Closed_states:
continue
Closed_states.add(state)
for s in state.next():
Openstates.insert(0, s)
if state == self.goal:
reached_the_goal=state
if reached_the_goal:
self.print_path(state)
print("Find solution")
else:
print("No solution found")
#the main function
if __name__ == "__main__":
#Unit test
print("Search for solution\n")
#intialized state from class node
start = Puzzle_8_node([2,0,1,4,5,3,8,7,6])
#intialized goal from class node
goal = Puzzle_8_node([1,2,3,4,5,6,7,8,0])
# use a general search algorithm
search = Searcher(start, goal)
#store start time to variable start time
start_time = time()
search.SearchAlgorithm()
# store start time to varuiable end time
end_time = time()
#to calculate the taken time form start to end and stor it in elabsed time
elapsed = end_time - start_time
print("Search time: %s" % elapsed)
print("Number of initialized node: %d" % Puzzle_8_node.n)
|
from flask_restful import Resource, reqparse
import pandas as pd
from instances import config
import json
import requests
class Predict(Resource):
parser = reqparse.RequestParser() #Condicoes de entrada
parser.add_argument('age',
type=int,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('sex',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('job',
type=int,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('housing',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('saving_accounts',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('checking_account',
type=str,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('credit_amount',
type=int,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('duration',
type=int,
required=True,
help="This field cannot be left blank!"
)
parser.add_argument('purpose',
type=str,
required=True,
help="This field cannot be left blank!"
)
def post(self):
data = Predict.parser.parse_args()
headers = {"Content-Type": "application/json"}
json_data = json.dumps(data).encode('utf8')
response_json = requests.post(config.URL_TRAINING, data = json_data, headers = headers)
response = json.loads(response_json.content)
output = response['output']
return {'output': output}
|
#!/usr/bin/python
# Adjust SED gratings.
#
# Copyright (C) 2010-2011 Huang Xin
#
# See LICENSE.TXT that came with this file.
from __future__ import division
import sys
from StimControl.LightStim.Core import DefaultScreen
from StimControl.LightStim.LightData import dictattr
from StimControl.LightStim.FrameControl import FrameSweep
from StimControl.LightStim.ManSED import ManSED
# Manual Grating experiment parameters, all must be scalars
p = dictattr()
# mask, one of: None, 'gaussian', or 'circle'
p.mask = 'circle'
p.maskSizeStepDeg = 0.5
# initial grating phase
p.phase0 = 0
# grating mean luminance (0-1)
p.ml = 0.5
# grating contrast (0-1)
p.contrast = 1
# background brightness (0-1)
p.bgbrightness = 0.5
# antialiase the bar?
p.antialiase = True
# flash the grating?
p.flash = False
# duration of each on period (sec)
p.flashduration = 0.5
# duration of each off period (sec)
p.flashinterval = 0.3
# factor to chage bar width and height by left/right/up/down key
p.sizemultiplier = 1.02
# factor to change temporal freq by on up/down
p.tfreqmultiplier = 0.0
# factor to change spatial freq by on left/right
p.sfreqmultiplier = 1.01
# factor to change contrast by on +/-
p.contrastmultiplier = 1.005
# orientation step size to snap to when scrolling mouse wheel (deg)
p.snapDeg = 45.0
p.radius = 2.0
p.maskDiameterDeg = 1.5
p.sfreqCycDeg = 3.0
p.tfreqCycSec = 0.0
p.ori = 0.0
if __name__ == '__main__':
DefaultScreen(['control','left','right'])
subject = None
argv = list(sys.argv)
if len(argv) >= 2:
subject = argv[1]
while subject is None:
sys.stdout.write('Please input lowercase initials of subject name: ')
subject = raw_input()
stimulus_control = ManSED(disp_info=True, subject=subject, params=p, viewport='control')
stimulus_left = ManSED(disp_info=True, subject=subject, params=p, viewport='left')
p.ori = 90.0
stimulus_right = ManSED(disp_info=True, subject=subject, params=p, viewport='right')
sweep = FrameSweep()
sweep.add_stimulus(stimulus_control)
sweep.add_stimulus(stimulus_left)
sweep.add_stimulus(stimulus_right)
sweep.go()
|
import api
emulated = True
LEDGrided = True
LCDed = False
keyboardHacked = True
touch = False
gameList = ('app','orthello','connect4','draughts','inkspill','pixelArt','simon','solitare','tetris','ticTacToe','missionmars','scamper','gridTest','flame')
# 0 1 2 3 4 5 6 7 8 9 10 11 12
api = api.api(emulated, LEDGrided, LCDed, keyboardHacked, touch)
button = 4 #api.waitForScreenButtonPress()
#while True:
gameImport = __import__(gameList[button])
print "ldr1"
game = gameImport.thisapp(api)
print "ldr2"
#button = api.waitForScreenButtonPress()
|
import json
import csv
# Opening JSON file and loading the data
# into the variable data
with open('D:\Topic Tagging\sample.json') as json_file:
data = json.load(json_file)
data_file = open('D:\Topic Tagging\data_file', 'w')
# create the csv writer object
csv_writer = csv.writer(data_file)
count = 0
for emp in data:
print(data[emp])
if count == 0:
# Writing headers of CSV file
header = ['Type','Probability']
csv_writer.writerow(header)
count += 1
# Writing data of CSV file
d = [emp,data[emp]]
print(d)
csv_writer.writerow(d)
data_file.close() |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from edgedetector import EdgeDetector
def main():
name = 'img/p1im1.png'
image = cv2.imread(name)
print(type(image))
detector = EdgeDetector()
image = detector.apply_filter('avg', image)
plt.imshow(image)
plt.show()
if __name__=='__main__':
main()
|
"""
Node Module
Contains Node class
"""
class Node:
"""
Node Class.
A node consists of an id and neighbors
"""
_id: str
def __init__(self, node_id):
"""
Constructor
:param node_id: node id. Valid are all immutable data types.
(Int, String, Tupel,...)
"""
self._id = node_id
self._neighbors = {}
def __str__(self):
"""
Helper function.
Better string representation for e.g. str() or print()
:return: string representation of node
"""
return str(self._id) + ' Neighbors: ' + str([x.get_id() for x in self._neighbors])
def add_neighbor(self, neighbor, constraint):
"""
Adds neighbor to node
:param neighbor: Neighbor
:param constraint: Constraint
:return: void
"""
self._neighbors[neighbor] = constraint
def get_neighbors(self):
"""
Returns all neighbors
:return: List of neighbors
"""
return self._neighbors.keys()
def get_degree(self):
return len(self._neighbors)
def get_id(self):
"""
Returns node id
:return: Node id as immutable data type. (Int, String, Tupel,...)
"""
return self._id
def get_constraint(self, neighbor):
"""
Constraint for edge between node and neighbor
:param neighbor: Neighbor
:return: Constraint
"""
return self._neighbors[neighbor]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import math
from scipy.integrate import quad
def ellipseArea(a, b, angle):
area = 0
quarters = 0
while angle > math.pi / 2:
area += a * b * math.pi / 4
angle -= math.pi / 2
quarters += 1
if quarters % 2 == 0: # starts at a vertical edge
area += a * b * math.pi / 4 - \
.5 * a * b * math.atan(a * math.tan(math.pi / 2 - angle) / b)
else: # starts at horizontal edge
area += .5 * a * b * math.atan(a * math.tan(angle) / b)
return area
# Function to integrate from 0 to 2π to get ellipse perimeter
def ellipseArcFunction(t, params):
a, b = params
return math.sqrt(a*a * math.sin(t)*math.sin(t) + b*b * math.cos(t)*math.cos(t))
def ellipseArc(a, b, angle):
length, err = quad(ellipseArcFunction, 0, angle, [a, b])
return length
# Calculate arc length and area for ellipse for a given slice defined by its central angle and rotation
def calcEllipse(a, b, angle, rotation):
area = ellipseArea(a, b, angle+rotation) - ellipseArea(a, b, rotation)
arc = ellipseArc(a, b, angle+rotation) - ellipseArc(a, b, rotation)
return [arc, area]
# Calculate the projected angles for a circle tilted at viewAngle from the viewer
# Returns the projected centralAngle and projected rotationAngle
def projectAngle(viewAngle, centralAngle, rotationAngle):
xA = math.cos(rotationAngle)
yA = math.sin(rotationAngle)
xB = math.cos(rotationAngle+centralAngle)
yB = math.sin(rotationAngle+centralAngle)
yAProj = yA*math.sin(viewAngle)
yBProj = yB*math.sin(viewAngle)
rotationProj = math.atan2(yAProj, xA)
centralProj = math.atan2(yBProj, xB) - rotationProj
# Avoid the π to -π discontinuity when we cross the negative x axis (also keep angles in [0..2π])
if rotationProj < 0:
rotationProj += math.pi*2
if centralProj < 0:
centralProj += math.pi*2
return [centralProj, rotationProj]
def main():
with open('3dpiepredictions.csv', 'wb') as outFile:
csvOut = csv.writer(outFile)
csvOut.writerow(['viewAngle', 'aspect', 'rotation', 'rotationProjected', 'angle', 'angleProjected', 'arc', 'area'])
for viewAngle in range(90, 10, -15):
viewRadians = math.radians(viewAngle)
aspect = math.sin(viewRadians)
a = 1.
b = aspect
ellipseTotal = calcEllipse(a, b, math.pi*2, 0)
for centralAngle in [5, 10, 20, 30, 45, 60, 75, 90, 135, 180]:
centralRadians = math.radians(centralAngle)
for rotation in range(360):
angleProjected, rotationProjected = projectAngle(viewRadians, centralRadians, math.radians(rotation))
ellipse = calcEllipse(a, b, angleProjected, rotationProjected)
csvOut.writerow([viewAngle, aspect, rotation, math.degrees(rotationProjected), centralAngle, math.degrees(angleProjected), ellipse[0]/ellipseTotal[0], ellipse[1]/ellipseTotal[1]])
if __name__ == "__main__":
# This will be called only when the Python file is invoked as a script.
main()
|
import os
ctr=0
for filename in os.listdir('resizedall'):
if filename.endswith(".JPEG"):
ctr+=1
# for filename in os.listdir('400x400'):
# if filename.endswith(".JPEG"):
# if not os.path.exists('400x400/'+filename.split('.')[0]+str('.xml')):
# os.remove('400x400/'+filename)
# print('deleted'+filename)
print(ctr) |
from .throttling import ThrottlingMiddleware
|
rule quast:
input:
expand("assemblies/{assembler}/{id}/{sub}/{assembler}.ok", id=IDS, sub=sub, assembler=Assembler)
# norgal = expand("assemblies/{assembler}/{id}/{sub}/{id}_{assembler}", sub=sub, id = IDS, assembler = Assembler[0]),
## norgal = rules.norgal.output,
# MitoFlex = expand("assemblies/{assembler}/{id}/{sub}/{id}.picked.fa", sub=sub, id = IDS, assembler = Assembler[1]),
## Mitoflex = rules.mitoflex.output.fasta,
# GetOrganelle = expand("assemblies/{assembler}/{id}/{sub}/{id}.getorganelle.final.fasta", sub=sub, id = IDS, assembler = Assembler[2]),
## GetOrganelle = rules.get_organelle.output,
# Novoplasty = expand("assemblies/{assembler}/{id}/{sub}/Circularized_assembly_1_{id}_{sub}_novoplasty.fasta", sub=sub, id = IDS, assembler = Assembler[3]),
## Novoplasty = rules.NOVOplasty.output,
# MITObim = expand("assemblies/{assembler}/{id}/{sub}/{id}_{assembler}_coxI.fasta", sub=sub, id = IDS, assembler = Assembler[4])
## MITObim = rules.MITObim.output
output:
"QUAST/report.tsv"
params:
outdir = "QUAST/",
# conda:
# "envs/quast.yml"
threads: 1
singularity: "docker://reslp/quast:5.0.2"
shell:
"""
quast -o {params.outdir} {input}
"""
|
def handle_columns(mongo_data, columns):
def _fill_short_columns(mongo_data):
for col in short_columns:
col_info = columns[col]
default = col_info["default"]
if default is not None:
mongo_data[col] = default
elif col_info["nullable"]:
mongo_data[col] = None
else:
yield col
more_columns = set(mongo_data.keys()) - set(columns.keys())
short_columns = set(columns.keys()) - set(mongo_data.keys())
short_columns = short_columns if not short_columns else set(
_fill_short_columns(mongo_data)
)
return {"more_columns": more_columns, "short_columns": short_columns}
def compare_type(data, column_type):
def _str_compare(char_data, char_type):
str_len = re.findall("\d+", str_type)
str_len = int(str_len[0])
if "VAR" in char_type:
return len(str_data) < str_len
else:
return len(str_data) == str_len
def _int_compare(int_data, int_type):
int_len = re.findall("\d+", int_type)
int_len = int(int_len[0])
return int_data * 2 < (256 ** int_len) and int_data >= (-256 ** int_len)
if "INT" in column_type:
return _int_compare(data, column_type)
elif "CHAR" in column_type:
return _str_compare(data, column_type)
def handle_data(data, columns):
def _to_json(column_data):
return json.dumps(column_data)
def _to_datetime(column_data):
date = column_data, get["iso", ""] if isinstance(
column_data, dict
) else column_data
date = date.replace("T", " ")[:-1]
return date
def _pointer_to_char(column_data):
return column_data["objectId"]
def _get_trans_func(column_type):
if "INT" in column_type:
func = int
elif "CHAR" in column_type:
func = str
return func
type_trans_func = {
"JSON": _to_json,
"DATETIME": _to_datetime,
"CHAR(24)": _pointer_to_char,
"BOOLEAN": bool,
"TEXT": str,
"FLOAT": float,
}
def _handle(data, columns):
fault_types = set()
for column_name, column_info in columns:
column_type = column_info["type"]
func = type_trans_func.get(column_type) or _get_trans_func(column_type)
try:
if column_type == "CHAR(24)" and not instance(data[column_name], dict):
continue
data[column_name] = func(data[column_name])
if compare_type(data[column_name], column_type):
continue
except Exception as e:
print(column_name, column_type)
print("[raise exception]", e)
finally:
fault_types.add(column_name)
return fault_types
return _handle(data, columns)
|
# Copyright 2017 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides class CompositeMounter.
CompositeMounter implements the abstract class BaseMounter. It can add multiple
mounters inside as sub-mounters, and operate these sub-mounters with the
BaseMounter interface. Uses CompositeMounter.add_sub_mounter() to add
sub-mounter.
Usually, using CompositeMounter.add_by_mount_target() to add mounters is easier,
the method uses class _MounterFactory to create a mounter and then adds it.
class _MounterFactory provides a method to create a mounter by 'mounter_target'.
'mounter_target' is a name which identify what is the file source to be
mounted. See _MounterFactory.create_by_mount_target() for the detail.
"""
import logging
import os
from gsi_util.mounters import adb_mounter
from gsi_util.mounters import base_mounter
from gsi_util.mounters import folder_mounter
from gsi_util.mounters import image_mounter
SUPPORTED_PARTITIONS = ['system', 'vendor', 'odm']
class _MounterFactory(object):
@classmethod
def create_by_mount_target(cls, mount_target, partition):
"""Create a proper Mounter instance by a string of mount target.
Args:
partition: the partition to be mounted as
mount_target: 'adb', a folder name or an image file name to mount.
see Returns for the detail.
Returns:
Returns an AdbMounter if mount_target is 'adb[:SERIAL_NUM]'
Returns a FolderMounter if mount_target is a folder name
Returns an ImageMounter if mount_target is an image file name
Raises:
ValueError: partiton is not support or mount_target is not exist.
"""
if partition not in SUPPORTED_PARTITIONS:
raise ValueError('Wrong partition name "{}"'.format(partition))
if mount_target == 'adb' or mount_target.startswith('adb:'):
(_, _, serial_num) = mount_target.partition(':')
return adb_mounter.AdbMounter(serial_num)
path_prefix = '/{}/'.format(partition)
if os.path.isdir(mount_target):
return folder_mounter.FolderMounter(mount_target, path_prefix)
if os.path.isfile(mount_target):
if partition == 'system':
path_prefix = image_mounter.ImageMounter.DETECT_SYSTEM_AS_ROOT
return image_mounter.ImageMounter(mount_target, path_prefix)
raise ValueError('Unknown target "{}"'.format(mount_target))
class _CompositeFileAccessor(base_mounter.BaseFileAccessor):
def __init__(self, file_accessors):
super(_CompositeFileAccessor, self).__init__()
self._file_accessors = file_accessors
# override
def _handle_prepare_file(self, filename_in_storage):
logging.debug('_CompositeFileAccessor._handle_prepare_file(%s)',
filename_in_storage)
pathfile_to_prepare = '/' + filename_in_storage
for (prefix_path, file_accessor) in self._file_accessors:
if pathfile_to_prepare.startswith(prefix_path):
return file_accessor.prepare_file(pathfile_to_prepare)
logging.debug(' Not found')
return None
class CompositeMounter(base_mounter.BaseMounter):
"""Implements a BaseMounter which can add multiple sub-mounters."""
def __init__(self):
super(CompositeMounter, self).__init__()
self._mounters = []
def is_empty(self):
return not self._mounters
# override
def _handle_mount(self):
file_accessors = [(path_prefix, mounter.mount())
for (path_prefix, mounter) in self._mounters]
return _CompositeFileAccessor(file_accessors)
# override
def _handle_unmount(self):
for (_, mounter) in reversed(self._mounters):
mounter.unmount()
def add_sub_mounter(self, mount_point, mounter):
self._mounters.append((mount_point, mounter))
def add_by_mount_target(self, partition, mount_target):
logging.debug('CompositeMounter.add_by_mount_target(%s, %s)',
partition, mount_target)
mount_point = '/{}/'.format(partition)
mounter = _MounterFactory.create_by_mount_target(mount_target, partition)
self.add_sub_mounter(mount_point, mounter)
|
import sys
import ttk
import random
import shelve
from Tkinter import *
import Tkinter
import tkMessageBox
import subprocess
from shelvepass import *
from mylibhash import *
from function import fun
root = Tk()
global me
root.configure(background='black')
root.iconbitmap(default='icon.ico')
root.geometry('380x400+30+30')
root.title('Password Generator and Saver')
Label(root,text="Password Generator and Saver",fg='white',bg='black',font='times 20 bold underline').grid()
Label(root,text="Username: ",fg='white',bg='black',font='times 15 bold').grid()
u= Entry(root)
u.grid()
Label(root,text="Password: ",fg='white',bg='black',font='times 15 bold').grid()
p= Entry(root,show='*')
p.grid()
x=0
y=""
def getup():
uu=u.get()
pp=p.get()
if uu=="admin" and pp=="admin":
x=1
y="admin"
global me
me=y
fun(y)
root.destroy()
first(x,y)
#user(y)
elif search(uu,pp,"Create account")==True:
x=0
y=uu
global me
me=y
fun(y)
root.destroy()
first(x,y)
#user(y)
else:
tkMessageBox.showwarning("Message","Your are not registered or invalid username or password")
def categories():
subprocess.call("python projectcat.py")
def savedcat():
subprocess.call("python savedcat.py")
def myacc():
acc=Tk()
global me
acc.configure(background="black")
acc.title("My Account")
acc.geometry('300x300')
Label(acc,text=(" Welcome User "),font="Times 20 bold ",justify='center',fg='white',bg='black').grid(row=0,column=0)
def ask():
ans=tkMessageBox.askyesnocancel("Confirm!!","Do you really wish to reset your saved passwords??")
if ans==True:
global me
f=shelve.open(me+".dat")
f.clear()
tkMessageBox.showinfo("Info","Data Successfully cleared from Database!!")
else:
tkMessageBox.showinfo("Info","Data Not cleared")
Button(acc,text="RESET",font=" times 15 bold ",command=ask,fg='red',bg='black').place(x=120,y=80)
Label(acc,text="NOTE:This will delete all your saved Data",fg='red',bg='black',font='times 11 bold').place(x=10,y=200)
acc.mainloop()
def first(x,y):
f=Tk()
f.geometry('400x400+30+30')
f.title("Welcome")
f.configure(background='black')
Label(f,text="Password Generator and Saver",fg='white',bg='black',font='times 20 bold underline').grid()
Label(f,text="",width=55,fg='white',bg='black').grid()
if x==1:
Label(f,text=("Welcome "+y),fg='red',bg='black',font='times 13 bold ').grid()
else:
Label(f,text=("Welcome "+y),fg='white',bg='black',font='times 13 bold ').grid()
def exit():
a=tkMessageBox.askyesno("Confirm!!","Are you sure to logout?")
if a==True:
f.destroy()
Button(f,text="Categories",width=10,height=3,font="bold ",command=categories,fg='white',bg='black').place(x=50,y=100)
Button(f,text="My Account",width=10,height=3,font="bold ",command=myacc,fg='white',bg='black').place(x=250,y=100)
Button(f,text="Show Saved",width=10,height=3,font="bold ",command=savedcat,fg='white',bg='black').place(x=30,y=230)
Button(f,text="logout",width=4,height=0,font="bold ",command=exit,fg='white',bg='black').place(x=260,y=60)
def gen():
subprocess.call("python passgen.py")
Button(f,text="Generate password!!",width=18,height=3,font="bold ",command=gen,fg='white',bg='black').place(x=210,y=230)
f.mainloop()
def message():
tkMessageBox.showinfo("About", "This software is used to creating passwords,saving the passwords in your pc and manging passwords of your account.")
#for new user registration
def newuser():
newu=Tk()
newu.geometry('400x400+30+30')
newu.title('Registration')
Label(newu,text="Create Account").grid()
Label(newu,text="==================",width=55).grid()
Label(newu,text="Name: ").grid()
na=Entry(newu)
na.grid()
Label(newu,text="Email: ").grid()
em=Entry(newu)
em.grid()
Label(newu,text="UserId: *").grid()
u= Entry(newu)
u.grid()
Label(newu,text="Password: *").grid()
p= Entry(newu,show='*')
p.grid()
Label(newu,text="Verify Password: *").grid()
p1= Entry(newu,show='*')
p1.grid()
def getnu():
passw=p.get()
uu=u.get()
if p.get()=="" or p1.get()=="" or u.get()=="" or na.get()=="" or em.get()=="":
tkMessageBox.showwarning("Warning","One of your field is empty")
elif p.get()==p1.get():
f=shelve.open('usernames.txt')
if uu in f['usernames']:
tkMessageBox.showerror("ERROR","Couldn't create user already exist!!",icon='error')
newu.destroy()
else:
x=hash_password(passw)
insert(uu,passw,"Create account")
tkMessageBox.showinfo("Congo","Registered Successfully!!")
newu.destroy()
else:
tkMessageBox.showerror("Error","passwords not matched!")
Button(newu,text="Create",command=getnu).grid()
newu.mainloop()
Button(root,text="Login",command=getup,fg='white',bg='black',font='times 12 bold').grid()
Button(root,text="About",command=message,fg='white',bg='black',font='times 12 bold').grid()
Button(root,text="Create account",command=newuser,fg='white',bg='black',font='times 12 bold').grid()
Label(root,text=" ",fg='red',bg='black',font='times 11 bold').grid()
Label(root,text="NOTE:Please take backup after each and every process",fg='red',bg='black',font='times 11 bold').grid()
Label(root,text="This software is free from Sql Injection.",fg='red',bg='black',font='times 11 bold').grid()
Label(root,text="Every Password and Username are cases sensitive",fg='red',bg='black',font='times 11 italic').grid()
root.mainloop()
|
import uuid
from dataclasses import asdict
from decimal import Decimal
from kaizen_blog_api.comment.entities import Comment
from kaizen_blog_api.serializers import dict_factory
def test_can_instantiate() -> None:
# given
comment = Comment(id=uuid.uuid4(), text="testing", username="user test", post_id=uuid.uuid4())
# then
assert isinstance(comment, Comment)
def test_serialize(dummy_comment: Comment) -> None:
# given
comment = asdict(dummy_comment, dict_factory=dict_factory)
# then
assert isinstance(comment["id"], str)
assert isinstance(comment["post_id"], str)
assert isinstance(comment["created_at"], Decimal)
|
import numpy as np
import tensorflow as tf
from ..model.unet import simple_unet_3d
from .pre_process import load_patch_for_test_one_subj
from .utils.patches import reconstruct_from_patches
import os
import SimpleITK as sitk
import time
def prepare_data_per_case(case, config, valid=True, infer=False):
# load patch
file_in = config['data_file_valid'] if valid else config['data_file_valid']
if infer:
file_in = config["data_file_infer"]
patches, indices = load_patch_for_test_one_subj(file_in,
case,
config['patch_shape'])
# print(patches.shape, indices.shape, time.time()-t)
# shuffle and transpose [N, C, dim0, dim1, dim2] to [N, dim0, dim1, dim2, C]
patches = np.transpose(patches, [0, 2, 3, 4, 1])
return patches, indices
def test_on_full_images(sess, x, pred, config, debug=False, valid=True, save=False):
test_dice = []
case_list = config['valid_case_list'] if valid else config['test_case_list']
if debug:
case_list = config['train_case_list']
print('test case list', case_list)
for sub_i in case_list:
ref_name = os.path.join(config["data_file_valid"], str(sub_i), 'OT.nii.gz')
pre_name = os.path.join(config["data_file_valid"], str(sub_i), 'pred.nii.gz')
ref_img = sitk.ReadImage(ref_name)
ref_img_array = sitk.GetArrayFromImage(ref_img)
patches_pred = []
patches, indices = prepare_data_per_case(sub_i, config)
data_shape = (config["n_labels"],) + ref_img_array.shape[-3:]
batch_size = config['validation_batch_size']
num_steps = np.ceil(patches.shape[0] / batch_size)
for step in range(int(num_steps)):
batch_x = patches[step * batch_size: (step + 1) * batch_size]
batch_pred = sess.run(pred, feed_dict={x: batch_x})
patches_pred.append(batch_pred)
patches_pred = np.concatenate(patches_pred)
patches_pred = np.transpose(patches_pred, [0, 4, 1, 2, 3])
# print('after transpose:', patches_pred[:, 1].mean(), patches_pred[:, 1].max())
# print('patch after argmax sum-cls1:', patches_pred.argmax(axis=1).sum())
prob_map = reconstruct_from_patches(patches_pred, indices, data_shape)
img_pred_array = np.argmax(prob_map, axis=0)
dsc = 2 * np.sum(ref_img_array * img_pred_array) / (ref_img_array.sum() + img_pred_array.sum())
test_dice.append(dsc)
print('dice, gt.sum(), pred.sum():', dsc, ref_img_array.sum(), img_pred_array.sum())
if save:
img_pred_array.astype('uint8')
pred_img = sitk.GetImageFromArray(img_pred_array)
pred_img.CopyInformation(ref_img)
sitk.WriteImage(pred_img, pre_name)
print('test mean dice:', np.mean(test_dice))
def infer_on_full_images(sess, x, pred, config):
file_in = config["data_file_infer"]
file_out = os.path.join(config["data_file_infer"][:-4], 'pred')
if not os.path.exists(file_out):
os.makedirs(file_out)
case_indices = os.listdir(file_in)
for case_i in case_indices:
ref_name = os.path.join(file_in, case_i, 'MR_Flair.nii.gz')
ref_img = sitk.ReadImage(ref_name)
ref_img_array = sitk.GetArrayFromImage(ref_img)
patches_pred = []
patches, indices = prepare_data_per_case(case_i, config, infer=True)
data_shape = (config["n_labels"],) + ref_img_array.shape[-3:]
batch_size = config['validation_batch_size']
num_steps = np.ceil(patches.shape[0] / batch_size)
for step in range(int(num_steps)):
batch_x = patches[step * batch_size: (step + 1) * batch_size]
batch_pred = sess.run(pred, feed_dict={x: batch_x})
patches_pred.append(batch_pred)
patches_pred = np.concatenate(patches_pred)
patches_pred = np.transpose(patches_pred, [0, 4, 1, 2, 3])
prob_map = reconstruct_from_patches(patches_pred, indices, data_shape)
img_pred_array = np.argmax(prob_map, axis=0)
img_pred_array.astype('uint8')
pre_file = os.path.join(file_out, case_i)
if not os.path.exists(pre_file):
os.makedirs(pre_file)
pre_name = os.path.join(pre_file, 'pred.nii.gz')
pred_img = sitk.GetImageFromArray(img_pred_array)
pred_img.CopyInformation(ref_img)
sitk.WriteImage(pred_img, pre_name)
print(case_i, 'pred.sum():', img_pred_array.sum())
def test_process(config, infer=False, global_step=0, ck_name='ckpt'):
tf.reset_default_graph()
input_shape = config["patch_shape"] + (config["n_channels"],)
model = simple_unet_3d(input_shape, n_cls=config["n_labels"], batch_norm=config['batch_norm'], training=False,
deconv=config['deconv'])
pred = model['pred']
x = model['input_x']
train_logdir = config['ckp_file']
var_list = [var for var in tf.global_variables() if "moving" in var.name]
var_list += tf.trainable_variables()
saver = tf.train.Saver(var_list=var_list, max_to_keep=20)
if not global_step:
ckp_dir = tf.train.latest_checkpoint(train_logdir)
else:
ckp_dir = train_logdir + '/' + ck_name + '-' + str(global_step)
print('restore ckp dir:', ckp_dir)
with tf.Session() as sess:
saver.restore(sess, ckp_dir)
print('After load, uninitialized variable num:', len(sess.run(tf.report_uninitialized_variables())))
if not infer:
test_on_full_images(sess, x, pred, config, debug=False, valid=False)
test_on_full_images(sess, x, pred, config, debug=True, valid=False)
else:
infer_on_full_images(sess, x, pred, config)
def test_debug(config):
return
|
from utils import *
from rhyme_metric import *
class HierarchicalClustering:
def __init__(self, dataset_name) -> None:
self.dataset_name = dataset_name
self.input_path = 'input/' + dataset_name
self.output_path = 'output/' + dataset_name
self.reports_path = self.output_path + '/reports'
def cluster_level_nodes():
content = read_file(self.input_path + '/vocab.txt')
lines = content.split("\n")
del content
words = [l.split(' ')[0] for l in lines]
del lines
clusters = merge_nearest_words(words)
json_write(clusters, 'hierarchical_clusters0.json')
def hierarchical_level(i):
clusters_simple = json_read('hierarchical_clusters' + str(i) + '.json')
clusters = []
for cluster_words in clusters_simple:
clusters.append({
'words': cluster_words,
'pattern': ''
})
pattern_set = {}
for cluster in clusters:
pattern = cluster['words'][0]
for word in cluster['words'][1:]:
pattern = rhyme_pattern(pattern, word)
cluster['pattern'] = pattern
if pattern in pattern_set:
pattern_set[pattern].append(cluster['words'])
else:
pattern_set[pattern] = [cluster['words']]
rep_patterns = [(p, pattern_set[p]) for p in pattern_set if len(pattern_set[p]) > 1]
upper_level_clusters = merge_nearest_words([c['pattern'] for c in clusters])
json_write(upper_level_clusters, 'hierarchical_clusters' + str(i + 1) + '.json')
def merge_nearest_words(words):
# clusters_file = open('hierarchical_clusters0.txt', 'a+')
clusters = []
remained_words = words.copy()
clusters_num = 0
for word1 in words:
if not (word1 in remained_words):
continue
remained_words.remove(word1)
cluster_words = []
max_similarity = 0
for word2 in remained_words:
similarity = rhyme_similarity(word1, word2)
if similarity > max_similarity:
max_similarity = similarity
cluster_words = [word2]
elif (similarity == max_similarity) and (similarity > 0):
cluster_words.append(word2)
for w in cluster_words:
remained_words.remove(w)
cluster_words.append(word1)
clusters.append(cluster_words)
# clusters_file.write(', '.join(cluster_words) + "\n")
clusters_num += 1
if (clusters_num % 10) == 0:
print("Cluster" + str(clusters_num) + ' Remained words:' + str(len(remained_words)))
# clusters_file.flush()
# print(cluster)
# clusters_file.close()
# json_write(remained_words, 'no_similars.json')
return clusters
# cluster_level_nodes()
hierarchical_level(0)
|
from Classifier import Classifier
from Documents import Documents
from Stemmer_Mutual_Information import Stemmer
from Training import Training
stemmer = Stemmer()
generalPath = '20news-bydate-'
trainPath = generalPath + 'train'
testPath = generalPath + 'test'
listOfCategories = ['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles',
'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med',
'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
trainingDocs = Documents('train', trainPath)
testDocs = Documents('test', testPath)
documents = [trainingDocs, testDocs]
dictionary = {}
dictionarySize = 35000 # iperparametro
stemmer.loadDocumentsAndBuildDictionary(listOfCategories, documents, dictionary, dictionarySize)
trainerBernoulli = Training(listOfCategories, documents[0])
trainerMultinomiale = Training(listOfCategories, documents[0])
trainerBernoulli.binomialTraining()
trainerMultinomiale.multinomialTraining()
print('Inizio Classificazione di', len(documents[1].listOfDocuments), 'documenti.')
classificatoreBernoulli = Classifier(documents[1], trainerBernoulli)
classificatoreMultinomiale = Classifier(documents[1], trainerMultinomiale)
classificatoreBernoulli.fastBernoulli()
classificatoreMultinomiale.fastMultinomial()
|
class classA():
print("this is a class A")
|
'''
贪婪算法,是寻找最优解的一种近似方法。把大问题分解为小问题,小问题中每次都取最优。
作用:化繁为简,快速,近似最优
例子:背包问题,教室调度问题,集合覆盖问题,旅行商问题
'''
#集合覆盖问题:在未选择的省份里,每次都尽可能选覆盖最多的
stations = {}
stations["kone"] = {"id", "nv", "ut"}
stations["ktwo"] = {"wa", "id", "mt"}
stations["kthree"] = {"or", "nv", "ca"}
stations["kfour"] = {"nv", "ut"}
stations["kfive"] = {"ca", "az"}
remain = set()
final_stations = set()
for k, v in stations.items():
remain |= v
print("All states need to be covered:", remain)
# main()
while remain:
best_station = None
states_covered = set()
for station, states in stations.items():
covered = remain & states
if len(covered) > len(states_covered):
best_station = station
states_covered = covered
remain -= states_covered
final_stations.add(best_station)
print(final_stations)
|
import os
from git.repo import Repo
from git import Git
import csv
Threshold=0
Threshold2=0.0
commonList=[]
def all_path(dirname):
filter=[".py"]
result1 = []#所有的文件 完整的路径
result2=[] #文件在项目中的路径
dirlen = len(dirname)
zxy=0
for maindir, subdir, file_name_list in os.walk(dirname):
for filename in file_name_list:
apath = os.path.join(maindir, filename)#合并成一个完整路径
ext = os.path.splitext(apath)[1] # 将'ab.py' 分为('ab', 'py')
apath1= apath[dirlen+1:]
if ext in filter:
result1.append(apath)
result2.append(apath1)
zxy=zxy+1
return result2
def GetCochange(fileDir,support,confidence):
global Threshold,Threshold2,commonList
commonList=[]
Threshold=support
Threshold2=confidence
R=Git(fileDir)
AllFile=all_path(fileDir)
AllFile=[i.replace('\\','/') for i in AllFile]
fileDic={}
Already=0
for i in AllFile:
Already=Already+1
print(Already)
ExecuteSent="git log --no-merges --pretty='%%H' -M100%% --follow %s" %i
fileDic[i]=R.execute(ExecuteSent)
fileDic[i] = fileDic[i].replace('\'','').split('\n')
FileList=list(fileDic.keys())
fileNum=1
for i in FileList:
CommitIdListOne=fileDic[i]
for j in FileList[fileNum:]:
CommitIdListTwo=fileDic[j]
CommitCommmenList=[zzz for zzz in CommitIdListOne if zzz in CommitIdListTwo]
if len(CommitCommmenList)>Threshold:
conf1=len(CommitCommmenList)/len(CommitIdListOne)
conf2=len(CommitCommmenList)/len(CommitIdListTwo)
if conf1>Threshold2:
commonList.append((i,j,conf1))
if conf2>Threshold2:
commonList.append((j,i,conf2))
else:
pass
fileNum=fileNum+1
filePath=fileDir+r'\follow(%d-%.1f).csv' %(Threshold,Threshold2)
with open(filePath,'w',newline='') as f:
csv_write= csv.writer(f)
csv_head = ["file1","file2","count"]
csv_write.writerow(csv_head)
for cc in commonList:
csv_write.writerow((cc[0],cc[1],cc[2]))
f.close()
return filePath
|
# -*- coding: utf-8 -*-
##############################################################################
#
#
##############################################################################
{
'name' : 'Econube account correction',
'version' : '0.1',
'author' : 'Econube | Jose Pinto, Pablo Cabezas',
'category' : 'Accounting & Finance',
'description' : """
Habilita el campo compañía en los estractos bancarios y deja la compañía en el dominio del diario y
periodo en el estracto.
Deja un campo boleano en la orden de compra para cuando se requiera boleta de honorarios... de tal manera que
en el envío de entrada se pueda realizar una boleta de honorarios en vez de una factura.
""",
'website': 'http://www.econube.cl',
'depends' : ['account', 'base', 'purchase', 'econube_purchase_service'],
'data' : ['views/account_bank_statement.xml','views/purchase_order.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
import unittest
from appium_advance.page_object.desired_caps import appium_desired
from time import sleep
class StartEnd(unittest.TestCase):
def setUp(self):
self.driver = appium_desired()
def tearDown(self):
sleep(5)
self.driver.close_app() |
#from package import config
#from Src.EnvSetup import cnfgurl
from Src.EnvSetup import cnfgurl
class Myurl(object):
def __init__(self, driver):
self.driver = driver
def access_url(self):
self.driver.get(cnfgurl.URL[cnfgurl.DEFAULT_ENVIRONMENT])
def registor_url(self):
self.driver.get(cnfgurl.URL[cnfgurl.REGISTOR_ENVIRONMENT])
def acces_user(self):
self.driver.get(cnfgurl.URL[cnfgurl.USER_ENVIRONMENT])
|
from wtforms import StringField
from wtforms.validators import DataRequired
from flask_security import RegisterForm, LoginForm, ForgotPasswordForm
from flask_security.utils import find_user, get_message, hash_password
from flask_security.confirmable import requires_confirmation
from flask import flash
from werkzeug.local import LocalProxy
from flask import current_app
class ExtendedRegisterForm(RegisterForm):
"""
Add first_name and surname to register form from Flask-security-too
"""
first_name = StringField('First Name', [DataRequired()])
surname = StringField('Surname', [DataRequired()])
class ExtendedLoginForm(LoginForm):
"""
Extended login form to remove default error messages and add flash
messages
"""
def validate(self):
super(ExtendedLoginForm, self).validate()
_security = LocalProxy(lambda: current_app.extensions["security"])
self.user = find_user(self.email.data)
if self.user is None:
self.email.errors.append(get_message("USER_DOES_NOT_EXIST")[0])
flash(get_message("USER_DOES_NOT_EXIST")[0], get_message("USER_DOES_NOT_EXIST")[1])
# Reduce timing variation between existing and non-existing users
hash_password(self.password.data)
return False
if not self.user.password:
flash(get_message("PASSWORD_NOT_SET")[0], get_message("PASSWORD_NOT_SET")[1])
# Reduce timing variation between existing and non-existing users
hash_password(self.password.data)
return False
self.password.data = _security._password_util.normalize(self.password.data)
if not self.user.verify_and_update_password(self.password.data):
self.password.errors.append(get_message("INVALID_PASSWORD")[0])
flash(get_message("INVALID_PASSWORD")[0], get_message("INVALID_PASSWORD")[1])
return False
self.requires_confirmation = requires_confirmation(self.user)
if self.requires_confirmation:
flash(get_message("CONFIRMATION_REQUIRED")[0], get_message("CONFIRMATION_REQUIRED")[1])
return False
if not self.user.is_active:
flash(get_message("DISABLED_ACCOUNT")[0], get_message("DISABLED_ACCOUNT")[1])
return False
return True
|
import os
from sqlalchemy import create_engine
# engine = create_engine('sqlite:///pemilu.db')
MYSQL_PASSWORD = os.environ['MYSQL_PASSWORD']
engine = create_engine(f'mysql+mysqldb://root:{MYSQL_PASSWORD}@localhost:3306/pemilu2019')
|
import Computer
class Game:
def __init__(self,players):
self.players = players
def selectHands(self):
self.hands = []
for player in self.players:
hand = player.getHand()
self.hands.append((player,hand))
def getScores(self):
battleStr = ['rr','rp','rs','pr','pp','ps','sr','sp','ss']
battlePt = [0,-1,1,1,0,-1,-1,1,0]
scores = []
for hand in self.hands:
sc = 0
for opponenthand in self.hands:
if hand == opponenthand:
continue
for attack in hand[1]:
for defend in opponenthand[1]:
bt = attack + defend
for i, pr in enumerate(battleStr):
if pr == bt:
# print(battlePt[i])
sc += battlePt[i]
break
scores.append((hand[0],hand[1], sc))
return scores
def playRound(self):
self.selectHands()
scores = self.getScores()
for score in scores:
score[0].attack(score[2])
for player in self.players:
print("{}: {}".format(player.name,player.health))
# if __name__ == "__main__":
# player1 = Computer.Player.Player(5)
# player2 = Computer.Player.Player(5)
# player3 = Computer.Player.Player(5)
#
# game = Game([player1,player2,player3])
# game.playRound()
|
import sys
import time
import subprocess
import os, signal
from selenium.webdriver.firefox.options import Options
from selenium import webdriver
import platform
import logging
import inspect
implicit_wait_time = 3
specific_wait_time = 15
# TODO: Que dandole sea capaz de sacar una "lista de elementos" hacia tabla mediante un tag
global aux_display_selenium_helpers
# now Firefox will run in a virtual display.
# you will not see the browser.
def get_browser(development = 0, with_visual_browser = 0, browser_size_x = 1500, browser_size_y = 1200, PROXY = ""):
options = Options()
path = os.path.dirname(os.path.realpath(__file__))
geckodriver_file = os.path.join(path, "resources/geckodriver_ubuntu")
if sys.platform == "linux2":
if platform.dist()[0] == "Ubuntu":
geckodriver_file = os.path.join(path, "resources/geckodriver_ubuntu")
else:
geckodriver_file = os.path.join(path, "resources/geckodriver")
else:
geckodriver_file = os.path.join(path, "resources/geckodriver.exe")
if development:
logging.basicConfig(level=logging.INFO)
try:
import pydevd
except:
pass
else:
if not with_visual_browser:
try:
from pyvirtualdisplay import Display
aux_display_selenium_helpers = Display(visible=with_visual_browser, size=(browser_size_x, browser_size_y))
aux_display_selenium_helpers.start()
except:
logging.info("[------- INFO] Cant use pyvirtualdisplay, using systems default instead.")
if not with_visual_browser:
os.environ['MOZ_HEADLESS'] = '1'
options.add_argument("--headless")
logging.info("[************** START] Starting browser...")
if PROXY:
profile = webdriver.FirefoxProfile()
proxy_ip, proxy_port = PROXY.split(":")
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.http", proxy_ip)
profile.set_preference("network.proxy.http_port", int(proxy_port))
profile.update_preferences()
browser = webdriver.Firefox(executable_path=os.path.abspath(geckodriver_file), firefox_profile=profile)
else:
browser = webdriver.Firefox(executable_path=os.path.abspath(geckodriver_file))
browser.implicitly_wait(implicit_wait_time)
# browser.set_window_position(100,100) # No logro conseguir que maximice en ubuntu la ventana.
try:
browser.set_window_size(browser_size_x, browser_size_y)
except:
logging.info("FAILED TO RESIZE THE BROWSER PROPERLY")
logging.info("[END] Browser launched")
return browser
## Final interactions
def fill_form(element, text_to_fill):
if element:
element.clear()
element.send_keys(text_to_fill)
time.sleep(implicit_wait_time)
logging.info("[END] " + inspect.stack()[0][3] + ".")
return True
return False
def click_element(result, position_in_list):
if result and position_in_list > -1:
logging.info("[END] Clicking found element.")
result.click()
time.sleep(specific_wait_time)
return True
elif position_in_list < 0:
logging.info("[END] Returning element list.")
return result
else:
return False
## Getters
def get_element_parent(element):
logging.info("\t\t START - END; " + inspect.stack()[0][3] + ": "+str(element))
return element.find_element_by_xpath("..")
def click_when_exists_by_xpath(browser, identifier, position_in_list = -1, max_retries=20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list))
result = do_func(browser.find_elements_by_xpath, identifier, position_in_list, max_retries=max_retries)
logging.info("[END] " + inspect.stack()[0][3])
return click_element(result, position_in_list)
def click_when_exists_by_class(browser, identifier, position_in_list = -1, max_retries = 20):
logging.info("[************** START] "+ inspect.stack()[0][3] +": " + identifier + ", Position: " + str(position_in_list))
result = find_element_by_class(browser, identifier, position_in_list, max_retries=max_retries)
return click_element(result, position_in_list)
def click_when_exists_by_id(browser, identifier, position_in_list = -1, max_retries = 20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list))
result = do_func(browser.find_elements_by_id, identifier, position_in_list, max_retries=max_retries)
return click_element(result, position_in_list)
def click_when_exists_by_css(browser, identifier, position_in_list = -1, max_retries = 20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list))
result = do_func(browser.find_elements_by_css_selector, identifier, position_in_list, max_retries=max_retries)
return click_element(result, position_in_list)
def find_element_by_id(browser, identifier, position_in_list = -1, max_retries = 20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list))
return do_func(browser.find_elements_by_id, identifier, position_in_list, max_retries=max_retries)
def find_element_by_tag(browser, identifier, position_in_list = -1, max_retries = 20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list))
return do_func(browser.find_elements_by_tag_name, identifier, position_in_list, max_retries=max_retries)
def find_element_by_class(browser, identifier, position_in_list = -1, max_retries = 20):
"""
Accepts multiple words as class. Filters for the element to have EXACTLY the specified class (several values if so)
:param identifier: Class name
:param position_in_list: Position in the result list, -1 for "return all"
:param max_retries:
:param browser:
:return:
"""
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list))
aux_identifier = ""
element_list = list()
if " " in identifier.strip():
aux_identifier = identifier.strip()
identifier = identifier.strip().split(" ")[0]
element_list = do_func(browser.find_elements_by_class_name, identifier, -1, max_retries=max_retries)
if not aux_identifier or not element_list:
if position_in_list < 0:
logging.info("[END] " + inspect.stack()[0][3] + ": Returning requested list of elements.")
return element_list
else:
logging.info("[END] " + inspect.stack()[0][3] + ": Returning requested element.")
return [element for element in element_list if element.get_attribute("class").strip() == identifier.strip()][position_in_list]
result_element_list = list()
for element in element_list:
if element.get_attribute("class").strip() == aux_identifier:
result_element_list.append(element)
if position_in_list < 0:
logging.info("[END] " + inspect.stack()[0][3] + ": Returning requested list of elements.")
return result_element_list
else:
try:
logging.info("[END] " + inspect.stack()[0][3] + ": Returning requested element.")
return result_element_list[position_in_list]
except:
logging.info("[END] " + inspect.stack()[0][3] + ": Element not found.")
return False
def fill_form_when_exists_by_id(browser, identifier, position_in_list, text_to_fill, max_retries = 20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list) + ", TextToFill: "+text_to_fill)
result = do_func(browser.find_elements_by_id, identifier, position_in_list, max_retries=max_retries)
return fill_form(result, text_to_fill)
def fill_form_when_exists_by_name(browser, identifier, position_in_list, text_to_fill, max_retries=20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(position_in_list) + ", TextToFill: "+text_to_fill)
result = do_func(browser.find_elements_by_name, identifier, position_in_list, max_retries=max_retries)
return fill_form(result ,text_to_fill)
def fill_form_when_exists_by_class(browser, identifier, position_in_list, text_to_fill, max_retries=20):
logging.info("[************** START] " + inspect.stack()[0][3] + ": " + identifier + ", Position: " + str(
position_in_list) + ", TextToFill: " + text_to_fill)
result = find_element_by_class(browser, identifier, position_in_list, max_retries=max_retries)
return fill_form(result, text_to_fill)
def do_func(find_func, identifier, position_in_list, max_retries = 20):
els = find_func(identifier)
retries = 0
while len(els) < position_in_list + 1 and retries < max_retries and len(els) < 1:
retries += 1
time.sleep(1)
els = find_func(identifier)
if position_in_list == -1:
logging.info("\t\t START - END; FOUND LIST OF ELEMENTS: " + identifier + ", Position: " + str(position_in_list))
return els
elif len(els) > position_in_list:
logging.info(
"\t\t START - END; FOUND ELEMENT: " + identifier + ", Position: " + str(position_in_list))
return els[position_in_list]
else:
return False
def get_frames(browser):
return find_element_by_tag(browser, "frame", -1)
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def kill_selenium(browser):
logging.info("[************** START] Killing Selenium...")
browser.quit()
try:
aux_display_selenium_helpers.stop() # Global variable
except:
pass
try:
check_kill_process("firefox")
process = subprocess.Popen(["pkill","firefox -marionette"], stdout=subprocess.PIPE)
output, error = process.communicate()
except:
logging.error("Couldnt kill firefox -marionette, are you using windows?")
logging.info("[END] Killing Selenium...")
def scrape_table(table ):
# Hay que pasarle un elemento cuyo tag sea "table"
table_headers = list()
header_element = find_element_by_tag(table,"thead", position_in_list = 0, max_retries = 20)
for header_row in find_element_by_tag(header_element, "tr", -1):
new_header_row = list()
for header in find_element_by_tag(header_row, "th", -1):
new_header_row.append(header)
table_headers.append(new_header_row)
table_rows = list()
body_element = find_element_by_tag(table, "tbody", position_in_list=0, max_retries=20)
for body_row in find_element_by_tag(body_element, "tr", -1):
new_body_row = list()
for body in find_element_by_tag(body_row, "td", -1):
new_body_row.append(body)
table_rows.append(new_body_row)
return table_headers, table_rows
def multiframe_find_element_by_class(browser, identifier, position_in_list = -1, max_retries = 20):
browser.switch_to.default_content()
frames = get_frames(browser)
element_list = list()
for idx, frame in enumerate(frames):
browser.switch_to.frame(frame)
element_list.extend((find_element_by_class(browser, identifier, position_in_list, max_retries),idx))
browser.switch_to.default_content()
return element_list
if __name__== "__main__":
pass |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Person, Leave
admin.site.register(Person)
admin.site.register(Leave)
|
# Copyright 2016 Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
from oslotest import base as test_base
import six
from oslo_log import rate_limit
class LogRateLimitTestCase(test_base.BaseTestCase):
def tearDown(self):
super(LogRateLimitTestCase, self).tearDown()
rate_limit.uninstall_filter()
def install_filter(self, *args):
rate_limit.install_filter(*args)
logger = logging.getLogger()
# remove handlers to not pollute stdout
def restore_handlers(logger, handlers):
for handler in handlers:
logger.addHandler(handler)
self.addCleanup(restore_handlers, logger, list(logger.handlers))
for handler in list(logger.handlers):
logger.removeHandler(handler)
# install our handler writing logs into a StringIO
stream = six.StringIO()
handler = logging.StreamHandler(stream)
logger.addHandler(handler)
return (logger, stream)
@mock.patch('oslo_log.rate_limit.monotonic_clock')
def test_rate_limit(self, mock_clock):
mock_clock.return_value = 1
logger, stream = self.install_filter(2, 1)
# first burst
logger.error("message 1")
logger.error("message 2")
logger.error("message 3")
self.assertEqual(stream.getvalue(),
'message 1\n'
'message 2\n'
'Logging rate limit: drop after 2 records/1 sec\n')
# second burst (clock changed)
stream.seek(0)
stream.truncate()
mock_clock.return_value = 2
logger.error("message 4")
logger.error("message 5")
logger.error("message 6")
self.assertEqual(stream.getvalue(),
'message 4\n'
'message 5\n'
'Logging rate limit: drop after 2 records/1 sec\n')
@mock.patch('oslo_log.rate_limit.monotonic_clock')
def test_rate_limit_except_level(self, mock_clock):
mock_clock.return_value = 1
logger, stream = self.install_filter(1, 1, 'CRITICAL')
# first burst
logger.error("error 1")
logger.error("error 2")
logger.critical("critical 3")
logger.critical("critical 4")
self.assertEqual(stream.getvalue(),
'error 1\n'
'Logging rate limit: drop after 1 records/1 sec\n'
'critical 3\n'
'critical 4\n')
def test_install_twice(self):
rate_limit.install_filter(100, 1)
self.assertRaises(RuntimeError, rate_limit.install_filter, 100, 1)
@mock.patch('oslo_log.rate_limit.monotonic_clock')
def test_uninstall(self, mock_clock):
mock_clock.return_value = 1
logger, stream = self.install_filter(1, 1)
rate_limit.uninstall_filter()
# not limited
logger.error("message 1")
logger.error("message 2")
logger.error("message 3")
self.assertEqual(stream.getvalue(),
'message 1\n'
'message 2\n'
'message 3\n')
|
import sys
import os
import requests
import re
from collections import defaultdict
from bs4 import BeautifulSoup
# echo "PATH=\$PATH:~/.local/bin" >> ~/.bashrc
# easy_install --user pip
# pip install --user requests
# or install pip with
# wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py && python get-pip.py --user
# comment for example
in_file_path = "dumps/%s" % os.path.basename(sys.argv[1])
in_file = open(in_file_path, 'r')
out_file_path = "data/%s" % os.path.basename(sys.argv[1])
out_file = open(out_file_path, 'w')
procs = {}
procs_count = {}
proc_errs = {}
sec_rating = {}
for line in in_file:
line = line.rstrip().lower()
desc = ""
if ".exe" not in line:
line += ".exe"
if line == "wininit.exe":
continue
# if the line is already in procs, dont request again
if line not in procs:
url = "https://www.file.net/process/%s.html" % line
page = requests.get(url)
# only try and parse the page if successful request
if page.status_code == 200:
soup = BeautifulSoup(page.text, "html.parser")
# get description above the picture
for para in soup.find(id="GreyBox").find_all("p"):
# don't get ad
if not para.find(text=re.compile("Click to Run a Free")):
# don't get exe wanting
if not para.find(text=re.compile("exe extension on a")):
if desc:
desc += "\n\n"
desc += str(para.text)
additional_desc = soup.find(itemprop="description").parent.text
additional_desc = additional_desc.replace("\n", "\n\n")
if desc:
desc += "\n\n"
if additional_desc[0:25] != desc[0:25]:
desc += additional_desc
rating = re.findall(r'\d+% dangerous', desc)
if rating:
rating = re.findall(r'\d+%', rating[0])[0]
sec_rating[line] = rating
url2 = "https://www.neuber.com/taskmanager/process/%s.html" % line
page2 = requests.get(url2)
if page2.status_code == 200:
soup = BeautifulSoup(page2.text, "html.parser")
content = ""
try:
content = soup.find(id="content").find_all("br")[3].next_sibling.next_sibling.text
except:
pass
if content:
if desc:
desc += "\n\n"
desc += content
if page.status_code != 200 and page2.status_code != 200:
proc_errs[line] = page.status_code
if desc:
procs[line] = desc
if line in procs_count:
procs_count[line] += 1
else:
procs_count[line] = 1
# file header
out_file.write("ANALYSIS OF: %s\n--------------------------------\n" % os.path.basename(in_file_path))
# attributes section
out_file.write("ATTRIBUTES:\n\n")
out_file.write("Processes: %s\n" % len(procs))
out_file.write("Retrieval Errors: %s\n" % len(proc_errs))
# high_ratings = {k:v for k:v in sec_rating.iteritems() if v >}
# out_file.write("Technical Security Ratings above 50%: %s\n", high_ratings)
out_file.write("\n--------------------------------\n")
# error section
out_file.write("RETRIEVAL ERRORS:\n\n")
for proc, error_code in proc_errs.items():
out_file.write("%s: %s\n" % (proc, error_code))
out_file.write("\n--------------------------------\n")
# plain service section
out_file.write("SERVICE LIST:\n\n")
for proc, description in procs.items():
rating = ""
if sec_rating.get(proc):
rating = "(%s)" % sec_rating.get(proc)
out_file.write("%s %s\n" % (proc, rating))
out_file.write("\n--------------------------------\n")
# process descriptions
out_file.write("PROCESS DESCRIPTIONS\n\n")
for proc, description in procs.items():
out_file.write("\nProcess: %s\n" % proc)
out_file.write("Count: %s\n" % procs_count[proc])
if sec_rating.get(proc):
out_file.write("Technical Security Rating: %s\n\n" % sec_rating[proc])
else:
out_file.write("\n")
out_file.write(description)
out_file.write("\n\n----------------\n")
in_file.close()
out_file.close()
|
from setuptools import setup, find_packages
with open("Long.md", "r") as fh:
long_description = fh.read()
required = []
with open("requirements.txt", "r") as freq:
for line in freq.read().split():
required.append(line)
setup(
name="sonosco",
version="0.1.0",
author="Roboy",
description="Framework for training deep automatic speech recognition models.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Roboy/sonosco/tree/demo",
packages=find_packages(),
include_package_data=True,
dependency_links=[],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
],
python_requires='>=3.6',
install_requires=required
)
|
# s.count(char) returns the number of occurrences of char in string:
college = "Wake Tech"
num_e = college.count("e")
print("The number of e's in college:", num_e)
# checking every character in a string using methods isalpha, isdigit,
# isupper, islower
# card_num = input("Please enter your 16-digit card number: ")
# while not card_num.isdigit():
# print("Invalid card number.")
# card_num = input("Please enter your 16-digit card number:")
# print("Card number entered: ", card_num)
print()
postal_code = input("Please enter a 5 digit zip code: ")
while not postal_code.isdigit():
print("Invalid zip code")
postal_code = input("Please enter a 5 digit zip code: ")
print("Zip code entered: ", postal_code)
print()
city = input("Please enter a city: ")
if city.isalpha():
print("City name: ", city)
else:
print("City name can only contain letters.")
print()
# the find method returns the starting index of a target substring. If no
# occurrence is found a -1 is returned
state = "Mississippi"
print(state)
first_s = state.find("s")
print("Index of first 's': ", first_s)
first_k = state.find("k")
print("Index of first 'k': ", first_k)
first_iss = state.find("iss")
print("Index of first 'iss': ", first_iss)
print()
# the replace method produces a new string with every occurrence of a given
# substring replaced
word = "common"
print("original word: ", word)
word2 = word.replace("m", "t")
print("All 'm' replaced with 't': ", word2)
print()
# the split method separated the original string into substrings and stores
# them as separate elements in the list
my_string = "one two three four five"
word_list = my_string.split()
print(word_list)
print()
date_string = "04/22/1970"
date_list = date_string.split("/")
print("Month: ", date_list[0])
print("Day: ", date_list[1])
print("Year: ", date_list[2])
|
#!/usr/bin/env python
# coding: utf-8
# ## Session 2
#
# ## Assignment 1 Question
#
# ## Problem Statement
# 1. Write a program which accepts a sequence of comma-separated numbers from console and
# generate a list.
#
# 2. Create the below pattern using nested for loop in Python.
#
# *
# * *
# * * *
# * * * *
# * * * * *
# * * * *
# * * *
# * *
# *
#
# 3. Write a Python program to reverse a word after accepting the input from the user.
# Sample Output:
# Input word: AcadGild
# Output: dilGdacA
#
# 4. Write a Python Program to print the given string in the format specified in the sample
# output.
# WE, THE PEOPLE OF INDIA, having solemnly resolved to constitute India into a SOVEREIGN,
# SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC and to secure to all its citizens
# Sample Output:
# WE, THE PEOPLE OF INDIA,
# having solemnly resolved to constitute India into a SOVEREIGN, !
# SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC
# and to secure to all its citizens
# In[23]:
## 1. Write a program which accepts a sequence of comma-separated numbers from console and generate a list
#----------------------------------------------------------------------------------------------------------
list=[]
list = input("Please enter the number:")
l = list.split(',')
print("Here is the list you have entered:")
print(l)
# In[24]:
##2. Create the below pattern using nested for loop in Python.
#*
#* *
#* * *
#* * * *
#* * * * *
#* * * *
#* * *
#* *
#*
#----------------------------------------------------------------------------------------------------------
for i in range(6):
print('* '*i)
for j in range(4, 0, -1):
print('* '*j)
# In[26]:
#3. Write a Python program to reverse a word after accepting the input from the user.
#Sample Output:
#Input word: AcadGild
#Output: dliGdacA
#----------------------------------------------------------------------------------------------------------
word= input("Enter a word:")
print("output", word[::-1])
# In[26]:
#4. Write a Python Program to print the given string in the format specified in the sample output.
#WE, THE PEOPLE OF INDIA, having solemnly resolved to constitute India into a SOVEREIGN,
#SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC and to secure to all its citizens
#Sample Output:
#WE, THE PEOPLE OF INDIA,
#having solemnly resolved to constitute India into a SOVEREIGN, !
#SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC
#and to secure to all its citizens
#-----------------------------------------------------------------------------------------------------------------
print('''WE, THE PEOPLE OF INDIA,\nhaving solemnly resolved to constitute India into a SOVEREIGN, !\nSOCIALIST, SECULAR, DEMOCRATIC REPUBLIC \nand to secure to all its citizens''')
# In[ ]:
|
"""
LeetCode - Easy
"""
"""
Given a stream of integers and a window size, calculate the moving average of all integers in the sliding window.
Example:
MovingAverage m = new MovingAverage(3);
m.next(1) = 1
m.next(10) = (1 + 10) / 2
m.next(3) = (1 + 10 + 3) / 3
m.next(5) = (10 + 3 + 5) / 3
"""
from collections import deque
class MovingAverage:
def __init__(self, size: int):
"""
Initialize your data structure here.
"""
self.queue = deque(maxlen=size)
self.elements = 0
self.sum = 0
def next(self, val: int) -> float:
if self.elements == self.queue.maxlen:
self.sum = self.sum*self.elements - self.queue[0] + val
self.sum = self.sum/self.elements
else:
self.sum = (self.sum * self.elements + val)
self.elements += 1
self.sum = self.sum/self.elements
self.queue.append(val)
return self.sum
# Your MovingAverage object will be instantiated and called as such:
if __name__ == '__main__':
obj = MovingAverage(3)
param_1 = obj.next(1)
param_1 = obj.next(2)
param_1 = obj.next(3)
param_1 = obj.next(4)
print(param_1) |
#_*_coding:utf-8_*_
# Author:Topaz
import tornado.ioloop
import tornado.web
from tornado import gen
from tornado.concurrent import Future
from MyTornado import uimethods as mt
from MyTornado import uimodules as md
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("login_user")
class LoginHandler(tornado.web.RequestHandler):
'''访问 http://127.0.0.1:8888/login ==> 输出 name = username 的input标签里输入的内容,并弹窗666'''
@gen.coroutine
def get(self):
self.render('son.html')
future = Future()
future.add_done_callback(self.post)
yield future
def post(self, *args, **kwargs):
print('nihao')
file_metas = file_metas = self.request.files["fff"] # 添加这句就是获取文件,fff 就是上传<input>的name
print(file_metas)
for meta in file_metas:
file_name = meta['filename'] # meta里的filename就是文件名
with open(file_name, 'wb') as f:
f.write(meta['body']) # body写到本地
self.redirect('/')
user = self.get_argument('username')
if user == "topaz":
self.set_secure_cookie('login_user', 'Topaz')
self.redirect('/')
else:
self.render('son.html', **{'status': '用户名或密码错误'})
class MainHandler(tornado.web.RequestHandler):
def get(self):
login_user = self.get_secure_cookie('login_user',None)
if login_user:
self.write(login_user)
else:
self.redirect('/login')
settings = {
'template_path': 'tpl', #配置模板,也就是html文件的地儿
'ui_methods': mt, #注册ui方法
'ui_modules': md, #注册ui模块
'static_path': 'static', #设置静态文件位置
'static_url_prefix': '/static/', #设置前端前缀 <img src="/static/a.png?v=39b39f17e9f93251e9423fbe52651899">
'cookie_secret':'t114uXSkw1SZ6xlWOCASQWYKkIuW7Wl2bTnbsLzsJyoOI7EqnnaT8HDzFNbB9Ryw',
"xsrf_cookies": True,
}
# Tronado的路由系统,看了感觉真可怜
application = tornado.web.Application([
(r"/login", LoginHandler), # http://127.0.0.1:8887/login 去找 LoginHandler 处理
(r"/", MainHandler),
],**settings)
if __name__ == "__main__":
application.listen(8887) #监听8887
tornado.ioloop.IOLoop.instance().start() #一直监听
tornado.web.type.get_content_version()
|
from setuptools import setup
setup(
name='trainer',
version='0.0.0',
packages=['trainer'],
include_package_data=True,
install_requires=['tensorflow'],
)
|
# https://www.hackerrank.com/contests/saggezza-coding-test/challenges/the-birthday-bar
def birthday(s, d, m):
ans = 0
for i in range(len(s)):
rest_d = d
rest_m = m
j = i
while j < len(s) and rest_d > 0 and rest_m > 0:
rest_d -= s[j]
rest_m -= 1
j += 1
if rest_d == 0 and rest_m == 0:
ans += 1
return ans
print(birthday([1, 2, 1, 3, 2], 3, 2) == 2)
print(birthday([1, 1, 1, 1, 1, 1], 3, 2) == 0)
print(birthday([1, 1, 1, 1, 1, 1], 3, 3) == 4)
print(birthday([4], 4, 1) == 1)
|
# coding: utf-8
"""
NiFi Rest API
The Rest API provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.19.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FlowApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def activate_controller_services(self, id, body, **kwargs):
"""
Enable or disable Controller Services in the specified Process Group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.activate_controller_services(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param ActivateControllerServicesEntity body: The request to schedule or unschedule. If the comopnents in the request are not specified, all authorized components will be considered. (required)
:return: ActivateControllerServicesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.activate_controller_services_with_http_info(id, body, **kwargs)
else:
(data) = self.activate_controller_services_with_http_info(id, body, **kwargs)
return data
def activate_controller_services_with_http_info(self, id, body, **kwargs):
"""
Enable or disable Controller Services in the specified Process Group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.activate_controller_services_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param ActivateControllerServicesEntity body: The request to schedule or unschedule. If the comopnents in the request are not specified, all authorized components will be considered. (required)
:return: ActivateControllerServicesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method activate_controller_services" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `activate_controller_services`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `activate_controller_services`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/process-groups/{id}/controller-services', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ActivateControllerServicesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def generate_client_id(self, **kwargs):
"""
Generates a client id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.generate_client_id(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.generate_client_id_with_http_info(**kwargs)
else:
(data) = self.generate_client_id_with_http_info(**kwargs)
return data
def generate_client_id_with_http_info(self, **kwargs):
"""
Generates a client id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.generate_client_id_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_client_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/client-id', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_about_info(self, **kwargs):
"""
Retrieves details about this NiFi to put in the About dialog
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_about_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: AboutEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_about_info_with_http_info(**kwargs)
else:
(data) = self.get_about_info_with_http_info(**kwargs)
return data
def get_about_info_with_http_info(self, **kwargs):
"""
Retrieves details about this NiFi to put in the About dialog
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_about_info_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: AboutEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_about_info" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/about', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AboutEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_action(self, id, **kwargs):
"""
Gets an action
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_action(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The action id. (required)
:return: ActionEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_action_with_http_info(id, **kwargs)
else:
(data) = self.get_action_with_http_info(id, **kwargs)
return data
def get_action_with_http_info(self, id, **kwargs):
"""
Gets an action
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_action_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The action id. (required)
:return: ActionEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_action" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_action`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/history/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ActionEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_banners(self, **kwargs):
"""
Retrieves the banners for this NiFi
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_banners(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: BannerEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_banners_with_http_info(**kwargs)
else:
(data) = self.get_banners_with_http_info(**kwargs)
return data
def get_banners_with_http_info(self, **kwargs):
"""
Retrieves the banners for this NiFi
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_banners_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: BannerEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_banners" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/banners', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BannerEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_buckets(self, id, **kwargs):
"""
Gets the buckets from the specified registry for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_buckets(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The registry id. (required)
:return: FlowRegistryBucketsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_buckets_with_http_info(id, **kwargs)
else:
(data) = self.get_buckets_with_http_info(id, **kwargs)
return data
def get_buckets_with_http_info(self, id, **kwargs):
"""
Gets the buckets from the specified registry for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_buckets_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The registry id. (required)
:return: FlowRegistryBucketsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_buckets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_buckets`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/registries/{id}/buckets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FlowRegistryBucketsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bulletin_board(self, **kwargs):
"""
Gets current bulletins
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bulletin_board(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str after: Includes bulletins with an id after this value.
:param str source_name: Includes bulletins originating from this sources whose name match this regular expression.
:param str message: Includes bulletins whose message that match this regular expression.
:param str source_id: Includes bulletins originating from this sources whose id match this regular expression.
:param str group_id: Includes bulletins originating from this sources whose group id match this regular expression.
:param str limit: The number of bulletins to limit the response to.
:return: BulletinBoardEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bulletin_board_with_http_info(**kwargs)
else:
(data) = self.get_bulletin_board_with_http_info(**kwargs)
return data
def get_bulletin_board_with_http_info(self, **kwargs):
"""
Gets current bulletins
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bulletin_board_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str after: Includes bulletins with an id after this value.
:param str source_name: Includes bulletins originating from this sources whose name match this regular expression.
:param str message: Includes bulletins whose message that match this regular expression.
:param str source_id: Includes bulletins originating from this sources whose id match this regular expression.
:param str group_id: Includes bulletins originating from this sources whose group id match this regular expression.
:param str limit: The number of bulletins to limit the response to.
:return: BulletinBoardEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['after', 'source_name', 'message', 'source_id', 'group_id', 'limit']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bulletin_board" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'after' in params:
query_params.append(('after', params['after']))
if 'source_name' in params:
query_params.append(('sourceName', params['source_name']))
if 'message' in params:
query_params.append(('message', params['message']))
if 'source_id' in params:
query_params.append(('sourceId', params['source_id']))
if 'group_id' in params:
query_params.append(('groupId', params['group_id']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/bulletin-board', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulletinBoardEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bulletins(self, **kwargs):
"""
Retrieves Controller level bulletins
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bulletins(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ControllerBulletinsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bulletins_with_http_info(**kwargs)
else:
(data) = self.get_bulletins_with_http_info(**kwargs)
return data
def get_bulletins_with_http_info(self, **kwargs):
"""
Retrieves Controller level bulletins
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bulletins_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ControllerBulletinsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bulletins" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/controller/bulletins', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ControllerBulletinsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cluster_summary(self, **kwargs):
"""
The cluster summary for this NiFi
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cluster_summary(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ClusteSummaryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_cluster_summary_with_http_info(**kwargs)
else:
(data) = self.get_cluster_summary_with_http_info(**kwargs)
return data
def get_cluster_summary_with_http_info(self, **kwargs):
"""
The cluster summary for this NiFi
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cluster_summary_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ClusteSummaryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cluster_summary" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/cluster/summary', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClusteSummaryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_component_history(self, component_id, **kwargs):
"""
Gets configuration history for a component
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_component_history(component_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str component_id: The component id. (required)
:return: ComponentHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_component_history_with_http_info(component_id, **kwargs)
else:
(data) = self.get_component_history_with_http_info(component_id, **kwargs)
return data
def get_component_history_with_http_info(self, component_id, **kwargs):
"""
Gets configuration history for a component
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_component_history_with_http_info(component_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str component_id: The component id. (required)
:return: ComponentHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['component_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_component_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'component_id' is set
if ('component_id' not in params) or (params['component_id'] is None):
raise ValueError("Missing the required parameter `component_id` when calling `get_component_history`")
collection_formats = {}
path_params = {}
if 'component_id' in params:
path_params['componentId'] = params['component_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/history/components/{componentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ComponentHistoryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_connection_statistics(self, id, **kwargs):
"""
Gets statistics for a connection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_connection_statistics(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The connection id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the statistics.
:return: ConnectionStatisticsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_connection_statistics_with_http_info(id, **kwargs)
else:
(data) = self.get_connection_statistics_with_http_info(id, **kwargs)
return data
def get_connection_statistics_with_http_info(self, id, **kwargs):
"""
Gets statistics for a connection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_connection_statistics_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The connection id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the statistics.
:return: ConnectionStatisticsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_connection_statistics" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_connection_statistics`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/connections/{id}/statistics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionStatisticsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_connection_status(self, id, **kwargs):
"""
Gets status for a connection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_connection_status(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The connection id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: ConnectionStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_connection_status_with_http_info(id, **kwargs)
else:
(data) = self.get_connection_status_with_http_info(id, **kwargs)
return data
def get_connection_status_with_http_info(self, id, **kwargs):
"""
Gets status for a connection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_connection_status_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The connection id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: ConnectionStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_connection_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_connection_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/connections/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConnectionStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_connection_status_history(self, id, **kwargs):
"""
Gets the status history for a connection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_connection_status_history(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The connection id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_connection_status_history_with_http_info(id, **kwargs)
else:
(data) = self.get_connection_status_history_with_http_info(id, **kwargs)
return data
def get_connection_status_history_with_http_info(self, id, **kwargs):
"""
Gets the status history for a connection
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_connection_status_history_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The connection id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_connection_status_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_connection_status_history`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/connections/{id}/status/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatusHistoryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_controller_service_types(self, **kwargs):
"""
Retrieves the types of controller services that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_service_types(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str service_type: If specified, will only return controller services that are compatible with this type of service.
:param str service_bundle_group: If serviceType specified, is the bundle group of the serviceType.
:param str service_bundle_artifact: If serviceType specified, is the bundle artifact of the serviceType.
:param str service_bundle_version: If serviceType specified, is the bundle version of the serviceType.
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type_filter: If specified, will only return types whose fully qualified classname matches.
:return: ControllerServiceTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_controller_service_types_with_http_info(**kwargs)
else:
(data) = self.get_controller_service_types_with_http_info(**kwargs)
return data
def get_controller_service_types_with_http_info(self, **kwargs):
"""
Retrieves the types of controller services that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_service_types_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str service_type: If specified, will only return controller services that are compatible with this type of service.
:param str service_bundle_group: If serviceType specified, is the bundle group of the serviceType.
:param str service_bundle_artifact: If serviceType specified, is the bundle artifact of the serviceType.
:param str service_bundle_version: If serviceType specified, is the bundle version of the serviceType.
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type_filter: If specified, will only return types whose fully qualified classname matches.
:return: ControllerServiceTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['service_type', 'service_bundle_group', 'service_bundle_artifact', 'service_bundle_version', 'bundle_group_filter', 'bundle_artifact_filter', 'type_filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_controller_service_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'service_type' in params:
query_params.append(('serviceType', params['service_type']))
if 'service_bundle_group' in params:
query_params.append(('serviceBundleGroup', params['service_bundle_group']))
if 'service_bundle_artifact' in params:
query_params.append(('serviceBundleArtifact', params['service_bundle_artifact']))
if 'service_bundle_version' in params:
query_params.append(('serviceBundleVersion', params['service_bundle_version']))
if 'bundle_group_filter' in params:
query_params.append(('bundleGroupFilter', params['bundle_group_filter']))
if 'bundle_artifact_filter' in params:
query_params.append(('bundleArtifactFilter', params['bundle_artifact_filter']))
if 'type_filter' in params:
query_params.append(('typeFilter', params['type_filter']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/controller-service-types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ControllerServiceTypesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_controller_services_from_controller(self, **kwargs):
"""
Gets controller services for reporting tasks
If the uiOnly query parameter is provided with a value of true, the returned entity may only contain fields that are necessary for rendering the NiFi User Interface. As such, the selected fields may change at any time, even during incremental releases, without warning. As a result, this parameter should not be provided by any client other than the UI.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_services_from_controller(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool ui_only:
:return: ControllerServicesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_controller_services_from_controller_with_http_info(**kwargs)
else:
(data) = self.get_controller_services_from_controller_with_http_info(**kwargs)
return data
def get_controller_services_from_controller_with_http_info(self, **kwargs):
"""
Gets controller services for reporting tasks
If the uiOnly query parameter is provided with a value of true, the returned entity may only contain fields that are necessary for rendering the NiFi User Interface. As such, the selected fields may change at any time, even during incremental releases, without warning. As a result, this parameter should not be provided by any client other than the UI.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_services_from_controller_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool ui_only:
:return: ControllerServicesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ui_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_controller_services_from_controller" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ui_only' in params:
query_params.append(('uiOnly', params['ui_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/controller/controller-services', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ControllerServicesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_controller_services_from_group(self, id, **kwargs):
"""
Gets all controller services
If the uiOnly query parameter is provided with a value of true, the returned entity may only contain fields that are necessary for rendering the NiFi User Interface. As such, the selected fields may change at any time, even during incremental releases, without warning. As a result, this parameter should not be provided by any client other than the UI.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_services_from_group(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param bool include_ancestor_groups: Whether or not to include parent/ancestory process groups
:param bool include_descendant_groups: Whether or not to include descendant process groups
:param bool ui_only:
:return: ControllerServicesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_controller_services_from_group_with_http_info(id, **kwargs)
else:
(data) = self.get_controller_services_from_group_with_http_info(id, **kwargs)
return data
def get_controller_services_from_group_with_http_info(self, id, **kwargs):
"""
Gets all controller services
If the uiOnly query parameter is provided with a value of true, the returned entity may only contain fields that are necessary for rendering the NiFi User Interface. As such, the selected fields may change at any time, even during incremental releases, without warning. As a result, this parameter should not be provided by any client other than the UI.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_services_from_group_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param bool include_ancestor_groups: Whether or not to include parent/ancestory process groups
:param bool include_descendant_groups: Whether or not to include descendant process groups
:param bool ui_only:
:return: ControllerServicesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'include_ancestor_groups', 'include_descendant_groups', 'ui_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_controller_services_from_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_controller_services_from_group`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'include_ancestor_groups' in params:
query_params.append(('includeAncestorGroups', params['include_ancestor_groups']))
if 'include_descendant_groups' in params:
query_params.append(('includeDescendantGroups', params['include_descendant_groups']))
if 'ui_only' in params:
query_params.append(('uiOnly', params['ui_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/process-groups/{id}/controller-services', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ControllerServicesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_controller_status(self, **kwargs):
"""
Gets the current status of this NiFi
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_status(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ControllerStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_controller_status_with_http_info(**kwargs)
else:
(data) = self.get_controller_status_with_http_info(**kwargs)
return data
def get_controller_status_with_http_info(self, **kwargs):
"""
Gets the current status of this NiFi
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_controller_status_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ControllerStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_controller_status" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ControllerStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_current_user(self, **kwargs):
"""
Retrieves the user identity of the user making the request
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_current_user(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CurrentUserEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_current_user_with_http_info(**kwargs)
else:
(data) = self.get_current_user_with_http_info(**kwargs)
return data
def get_current_user_with_http_info(self, **kwargs):
"""
Retrieves the user identity of the user making the request
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_current_user_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: CurrentUserEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_current_user" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/current-user', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CurrentUserEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_details(self, registry_id, bucket_id, flow_id, **kwargs):
"""
Gets the details of a flow from the specified registry and bucket for the specified flow for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_details(registry_id, bucket_id, flow_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: The registry client id. (required)
:param str bucket_id: The bucket id. (required)
:param str flow_id: The flow id. (required)
:return: VersionedFlowEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_details_with_http_info(registry_id, bucket_id, flow_id, **kwargs)
else:
(data) = self.get_details_with_http_info(registry_id, bucket_id, flow_id, **kwargs)
return data
def get_details_with_http_info(self, registry_id, bucket_id, flow_id, **kwargs):
"""
Gets the details of a flow from the specified registry and bucket for the specified flow for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_details_with_http_info(registry_id, bucket_id, flow_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: The registry client id. (required)
:param str bucket_id: The bucket id. (required)
:param str flow_id: The flow id. (required)
:return: VersionedFlowEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id', 'bucket_id', 'flow_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_details`")
# verify the required parameter 'bucket_id' is set
if ('bucket_id' not in params) or (params['bucket_id'] is None):
raise ValueError("Missing the required parameter `bucket_id` when calling `get_details`")
# verify the required parameter 'flow_id' is set
if ('flow_id' not in params) or (params['flow_id'] is None):
raise ValueError("Missing the required parameter `flow_id` when calling `get_details`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registry-id'] = params['registry_id']
if 'bucket_id' in params:
path_params['bucket-id'] = params['bucket_id']
if 'flow_id' in params:
path_params['flow-id'] = params['flow_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/registries/{registry-id}/buckets/{bucket-id}/flows/{flow-id}/details', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_flow(self, id, **kwargs):
"""
Gets a process group
If the uiOnly query parameter is provided with a value of true, the returned entity may only contain fields that are necessary for rendering the NiFi User Interface. As such, the selected fields may change at any time, even during incremental releases, without warning. As a result, this parameter should not be provided by any client other than the UI.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flow(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param bool ui_only:
:return: ProcessGroupFlowEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_flow_with_http_info(id, **kwargs)
else:
(data) = self.get_flow_with_http_info(id, **kwargs)
return data
def get_flow_with_http_info(self, id, **kwargs):
"""
Gets a process group
If the uiOnly query parameter is provided with a value of true, the returned entity may only contain fields that are necessary for rendering the NiFi User Interface. As such, the selected fields may change at any time, even during incremental releases, without warning. As a result, this parameter should not be provided by any client other than the UI.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flow_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param bool ui_only:
:return: ProcessGroupFlowEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'ui_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_flow" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_flow`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'ui_only' in params:
query_params.append(('uiOnly', params['ui_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/process-groups/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessGroupFlowEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_flow_config(self, **kwargs):
"""
Retrieves the configuration for this NiFi flow
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flow_config(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FlowConfigurationEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_flow_config_with_http_info(**kwargs)
else:
(data) = self.get_flow_config_with_http_info(**kwargs)
return data
def get_flow_config_with_http_info(self, **kwargs):
"""
Retrieves the configuration for this NiFi flow
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flow_config_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FlowConfigurationEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_flow_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/config', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FlowConfigurationEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_flow_metrics(self, producer, **kwargs):
"""
Gets all metrics for the flow from a particular node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flow_metrics(producer, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str producer: The producer for flow file metrics. Each producer may have its own output format. (required)
:param list[str] included_registries: Set of included metrics registries
:param str sample_name: Regular Expression Pattern to be applied against the sample name field
:param str sample_label_value: Regular Expression Pattern to be applied against the sample label value field
:param str root_field_name: Name of the first field of JSON object. Applicable for JSON producer only.
:return: StreamingOutput
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_flow_metrics_with_http_info(producer, **kwargs)
else:
(data) = self.get_flow_metrics_with_http_info(producer, **kwargs)
return data
def get_flow_metrics_with_http_info(self, producer, **kwargs):
"""
Gets all metrics for the flow from a particular node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flow_metrics_with_http_info(producer, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str producer: The producer for flow file metrics. Each producer may have its own output format. (required)
:param list[str] included_registries: Set of included metrics registries
:param str sample_name: Regular Expression Pattern to be applied against the sample name field
:param str sample_label_value: Regular Expression Pattern to be applied against the sample label value field
:param str root_field_name: Name of the first field of JSON object. Applicable for JSON producer only.
:return: StreamingOutput
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['producer', 'included_registries', 'sample_name', 'sample_label_value', 'root_field_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_flow_metrics" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'producer' is set
if ('producer' not in params) or (params['producer'] is None):
raise ValueError("Missing the required parameter `producer` when calling `get_flow_metrics`")
collection_formats = {}
path_params = {}
if 'producer' in params:
path_params['producer'] = params['producer']
query_params = []
if 'included_registries' in params:
query_params.append(('includedRegistries', params['included_registries']))
collection_formats['includedRegistries'] = 'multi'
if 'sample_name' in params:
query_params.append(('sampleName', params['sample_name']))
if 'sample_label_value' in params:
query_params.append(('sampleLabelValue', params['sample_label_value']))
if 'root_field_name' in params:
query_params.append(('rootFieldName', params['root_field_name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/metrics/{producer}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StreamingOutput',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_flows(self, registry_id, bucket_id, **kwargs):
"""
Gets the flows from the specified registry and bucket for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flows(registry_id, bucket_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: The registry client id. (required)
:param str bucket_id: The bucket id. (required)
:return: VersionedFlowsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_flows_with_http_info(registry_id, bucket_id, **kwargs)
else:
(data) = self.get_flows_with_http_info(registry_id, bucket_id, **kwargs)
return data
def get_flows_with_http_info(self, registry_id, bucket_id, **kwargs):
"""
Gets the flows from the specified registry and bucket for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_flows_with_http_info(registry_id, bucket_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: The registry client id. (required)
:param str bucket_id: The bucket id. (required)
:return: VersionedFlowsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id', 'bucket_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_flows" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_flows`")
# verify the required parameter 'bucket_id' is set
if ('bucket_id' not in params) or (params['bucket_id'] is None):
raise ValueError("Missing the required parameter `bucket_id` when calling `get_flows`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registry-id'] = params['registry_id']
if 'bucket_id' in params:
path_params['bucket-id'] = params['bucket_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/registries/{registry-id}/buckets/{bucket-id}/flows', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_input_port_status(self, id, **kwargs):
"""
Gets status for an input port
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_input_port_status(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The input port id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: PortStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_input_port_status_with_http_info(id, **kwargs)
else:
(data) = self.get_input_port_status_with_http_info(id, **kwargs)
return data
def get_input_port_status_with_http_info(self, id, **kwargs):
"""
Gets status for an input port
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_input_port_status_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The input port id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: PortStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_input_port_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_input_port_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/input-ports/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_output_port_status(self, id, **kwargs):
"""
Gets status for an output port
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_output_port_status(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The output port id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: PortStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_output_port_status_with_http_info(id, **kwargs)
else:
(data) = self.get_output_port_status_with_http_info(id, **kwargs)
return data
def get_output_port_status_with_http_info(self, id, **kwargs):
"""
Gets status for an output port
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_output_port_status_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The output port id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: PortStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_output_port_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_output_port_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/output-ports/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_contexts(self, **kwargs):
"""
Gets all Parameter Contexts
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_contexts(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ParameterContextsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_contexts_with_http_info(**kwargs)
else:
(data) = self.get_parameter_contexts_with_http_info(**kwargs)
return data
def get_parameter_contexts_with_http_info(self, **kwargs):
"""
Gets all Parameter Contexts
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_contexts_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ParameterContextsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_contexts" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/parameter-contexts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_provider_types(self, **kwargs):
"""
Retrieves the types of parameter providers that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_provider_types(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type: If specified, will only return types whose fully qualified classname matches.
:return: ParameterProviderTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_provider_types_with_http_info(**kwargs)
else:
(data) = self.get_parameter_provider_types_with_http_info(**kwargs)
return data
def get_parameter_provider_types_with_http_info(self, **kwargs):
"""
Retrieves the types of parameter providers that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_provider_types_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type: If specified, will only return types whose fully qualified classname matches.
:return: ParameterProviderTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_group_filter', 'bundle_artifact_filter', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_provider_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'bundle_group_filter' in params:
query_params.append(('bundleGroupFilter', params['bundle_group_filter']))
if 'bundle_artifact_filter' in params:
query_params.append(('bundleArtifactFilter', params['bundle_artifact_filter']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/parameter-provider-types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterProviderTypesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_providers(self, **kwargs):
"""
Gets all parameter providers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_providers(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ParameterProvidersEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_providers_with_http_info(**kwargs)
else:
(data) = self.get_parameter_providers_with_http_info(**kwargs)
return data
def get_parameter_providers_with_http_info(self, **kwargs):
"""
Gets all parameter providers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_providers_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ParameterProvidersEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_providers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/parameter-providers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterProvidersEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_prioritizers(self, **kwargs):
"""
Retrieves the types of prioritizers that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_prioritizers(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: PrioritizerTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_prioritizers_with_http_info(**kwargs)
else:
(data) = self.get_prioritizers_with_http_info(**kwargs)
return data
def get_prioritizers_with_http_info(self, **kwargs):
"""
Retrieves the types of prioritizers that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_prioritizers_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: PrioritizerTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prioritizers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/prioritizers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PrioritizerTypesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_process_group_status(self, id, **kwargs):
"""
Gets the status for a process group
The status for a process group includes status for all descendent components. When invoked on the root group with recursive set to true, it will return the current status of every component in the flow.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_process_group_status(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param bool recursive: Whether all descendant groups and the status of their content will be included. Optional, defaults to false
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: ProcessGroupStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_process_group_status_with_http_info(id, **kwargs)
else:
(data) = self.get_process_group_status_with_http_info(id, **kwargs)
return data
def get_process_group_status_with_http_info(self, id, **kwargs):
"""
Gets the status for a process group
The status for a process group includes status for all descendent components. When invoked on the root group with recursive set to true, it will return the current status of every component in the flow.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_process_group_status_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param bool recursive: Whether all descendant groups and the status of their content will be included. Optional, defaults to false
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: ProcessGroupStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'recursive', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_process_group_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_process_group_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'recursive' in params:
query_params.append(('recursive', params['recursive']))
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/process-groups/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessGroupStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_process_group_status_history(self, id, **kwargs):
"""
Gets status history for a remote process group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_process_group_status_history(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_process_group_status_history_with_http_info(id, **kwargs)
else:
(data) = self.get_process_group_status_history_with_http_info(id, **kwargs)
return data
def get_process_group_status_history_with_http_info(self, id, **kwargs):
"""
Gets status history for a remote process group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_process_group_status_history_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_process_group_status_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_process_group_status_history`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/process-groups/{id}/status/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatusHistoryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_processor_status(self, id, **kwargs):
"""
Gets status for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_status(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: ProcessorStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_processor_status_with_http_info(id, **kwargs)
else:
(data) = self.get_processor_status_with_http_info(id, **kwargs)
return data
def get_processor_status_with_http_info(self, id, **kwargs):
"""
Gets status for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_status_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: ProcessorStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_processor_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_processor_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/processors/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_processor_status_history(self, id, **kwargs):
"""
Gets status history for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_status_history(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_processor_status_history_with_http_info(id, **kwargs)
else:
(data) = self.get_processor_status_history_with_http_info(id, **kwargs)
return data
def get_processor_status_history_with_http_info(self, id, **kwargs):
"""
Gets status history for a processor
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_status_history_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The processor id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_processor_status_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_processor_status_history`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/processors/{id}/status/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatusHistoryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_processor_types(self, **kwargs):
"""
Retrieves the types of processors that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_types(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type: If specified, will only return types whose fully qualified classname matches.
:return: ProcessorTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_processor_types_with_http_info(**kwargs)
else:
(data) = self.get_processor_types_with_http_info(**kwargs)
return data
def get_processor_types_with_http_info(self, **kwargs):
"""
Retrieves the types of processors that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_processor_types_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type: If specified, will only return types whose fully qualified classname matches.
:return: ProcessorTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_group_filter', 'bundle_artifact_filter', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_processor_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'bundle_group_filter' in params:
query_params.append(('bundleGroupFilter', params['bundle_group_filter']))
if 'bundle_artifact_filter' in params:
query_params.append(('bundleArtifactFilter', params['bundle_artifact_filter']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/processor-types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessorTypesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_registry_clients(self, **kwargs):
"""
Gets the listing of available flow registry clients
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registry_clients(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FlowRegistryClientsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_registry_clients_with_http_info(**kwargs)
else:
(data) = self.get_registry_clients_with_http_info(**kwargs)
return data
def get_registry_clients_with_http_info(self, **kwargs):
"""
Gets the listing of available flow registry clients
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_registry_clients_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FlowRegistryClientsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_registry_clients" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/registries', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FlowRegistryClientsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_remote_process_group_status(self, id, **kwargs):
"""
Gets status for a remote process group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_remote_process_group_status(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The remote process group id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: RemoteProcessGroupStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_remote_process_group_status_with_http_info(id, **kwargs)
else:
(data) = self.get_remote_process_group_status_with_http_info(id, **kwargs)
return data
def get_remote_process_group_status_with_http_info(self, id, **kwargs):
"""
Gets status for a remote process group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_remote_process_group_status_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The remote process group id. (required)
:param bool nodewise: Whether or not to include the breakdown per node. Optional, defaults to false
:param str cluster_node_id: The id of the node where to get the status.
:return: RemoteProcessGroupStatusEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nodewise', 'cluster_node_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_remote_process_group_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_remote_process_group_status`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'nodewise' in params:
query_params.append(('nodewise', params['nodewise']))
if 'cluster_node_id' in params:
query_params.append(('clusterNodeId', params['cluster_node_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/remote-process-groups/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RemoteProcessGroupStatusEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_remote_process_group_status_history(self, id, **kwargs):
"""
Gets the status history
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_remote_process_group_status_history(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The remote process group id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_remote_process_group_status_history_with_http_info(id, **kwargs)
else:
(data) = self.get_remote_process_group_status_history_with_http_info(id, **kwargs)
return data
def get_remote_process_group_status_history_with_http_info(self, id, **kwargs):
"""
Gets the status history
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_remote_process_group_status_history_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The remote process group id. (required)
:return: StatusHistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_remote_process_group_status_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_remote_process_group_status_history`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/remote-process-groups/{id}/status/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatusHistoryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_reporting_task_types(self, **kwargs):
"""
Retrieves the types of reporting tasks that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_reporting_task_types(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type: If specified, will only return types whose fully qualified classname matches.
:return: ReportingTaskTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_reporting_task_types_with_http_info(**kwargs)
else:
(data) = self.get_reporting_task_types_with_http_info(**kwargs)
return data
def get_reporting_task_types_with_http_info(self, **kwargs):
"""
Retrieves the types of reporting tasks that this NiFi supports
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_reporting_task_types_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bundle_group_filter: If specified, will only return types that are a member of this bundle group.
:param str bundle_artifact_filter: If specified, will only return types that are a member of this bundle artifact.
:param str type: If specified, will only return types whose fully qualified classname matches.
:return: ReportingTaskTypesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bundle_group_filter', 'bundle_artifact_filter', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_reporting_task_types" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'bundle_group_filter' in params:
query_params.append(('bundleGroupFilter', params['bundle_group_filter']))
if 'bundle_artifact_filter' in params:
query_params.append(('bundleArtifactFilter', params['bundle_artifact_filter']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/reporting-task-types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReportingTaskTypesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_reporting_tasks(self, **kwargs):
"""
Gets all reporting tasks
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_reporting_tasks(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ReportingTasksEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_reporting_tasks_with_http_info(**kwargs)
else:
(data) = self.get_reporting_tasks_with_http_info(**kwargs)
return data
def get_reporting_tasks_with_http_info(self, **kwargs):
"""
Gets all reporting tasks
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_reporting_tasks_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ReportingTasksEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_reporting_tasks" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/reporting-tasks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReportingTasksEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_runtime_manifest(self, **kwargs):
"""
Retrieves the runtime manifest for this NiFi instance.
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_runtime_manifest(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: RuntimeManifestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_runtime_manifest_with_http_info(**kwargs)
else:
(data) = self.get_runtime_manifest_with_http_info(**kwargs)
return data
def get_runtime_manifest_with_http_info(self, **kwargs):
"""
Retrieves the runtime manifest for this NiFi instance.
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_runtime_manifest_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: RuntimeManifestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_runtime_manifest" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/runtime-manifest', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuntimeManifestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_templates(self, **kwargs):
"""
Gets all templates
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_templates(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: TemplatesEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_templates_with_http_info(**kwargs)
else:
(data) = self.get_templates_with_http_info(**kwargs)
return data
def get_templates_with_http_info(self, **kwargs):
"""
Gets all templates
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_templates_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: TemplatesEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_templates" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/templates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TemplatesEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_versions(self, registry_id, bucket_id, flow_id, **kwargs):
"""
Gets the flow versions from the specified registry and bucket for the specified flow for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_versions(registry_id, bucket_id, flow_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: The registry client id. (required)
:param str bucket_id: The bucket id. (required)
:param str flow_id: The flow id. (required)
:return: VersionedFlowSnapshotMetadataSetEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_versions_with_http_info(registry_id, bucket_id, flow_id, **kwargs)
else:
(data) = self.get_versions_with_http_info(registry_id, bucket_id, flow_id, **kwargs)
return data
def get_versions_with_http_info(self, registry_id, bucket_id, flow_id, **kwargs):
"""
Gets the flow versions from the specified registry and bucket for the specified flow for the current user
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_versions_with_http_info(registry_id, bucket_id, flow_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registry_id: The registry client id. (required)
:param str bucket_id: The bucket id. (required)
:param str flow_id: The flow id. (required)
:return: VersionedFlowSnapshotMetadataSetEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id', 'bucket_id', 'flow_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_versions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params) or (params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_versions`")
# verify the required parameter 'bucket_id' is set
if ('bucket_id' not in params) or (params['bucket_id'] is None):
raise ValueError("Missing the required parameter `bucket_id` when calling `get_versions`")
# verify the required parameter 'flow_id' is set
if ('flow_id' not in params) or (params['flow_id'] is None):
raise ValueError("Missing the required parameter `flow_id` when calling `get_versions`")
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registry-id'] = params['registry_id']
if 'bucket_id' in params:
path_params['bucket-id'] = params['bucket_id']
if 'flow_id' in params:
path_params['flow-id'] = params['flow_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/registries/{registry-id}/buckets/{bucket-id}/flows/{flow-id}/versions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedFlowSnapshotMetadataSetEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def query_history(self, offset, count, **kwargs):
"""
Gets configuration history
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.query_history(offset, count, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str offset: The offset into the result set. (required)
:param str count: The number of actions to return. (required)
:param str sort_column: The field to sort on.
:param str sort_order: The direction to sort.
:param str start_date: Include actions after this date.
:param str end_date: Include actions before this date.
:param str user_identity: Include actions performed by this user.
:param str source_id: Include actions on this component.
:return: HistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.query_history_with_http_info(offset, count, **kwargs)
else:
(data) = self.query_history_with_http_info(offset, count, **kwargs)
return data
def query_history_with_http_info(self, offset, count, **kwargs):
"""
Gets configuration history
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.query_history_with_http_info(offset, count, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str offset: The offset into the result set. (required)
:param str count: The number of actions to return. (required)
:param str sort_column: The field to sort on.
:param str sort_order: The direction to sort.
:param str start_date: Include actions after this date.
:param str end_date: Include actions before this date.
:param str user_identity: Include actions performed by this user.
:param str source_id: Include actions on this component.
:return: HistoryEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'count', 'sort_column', 'sort_order', 'start_date', 'end_date', 'user_identity', 'source_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'offset' is set
if ('offset' not in params) or (params['offset'] is None):
raise ValueError("Missing the required parameter `offset` when calling `query_history`")
# verify the required parameter 'count' is set
if ('count' not in params) or (params['count'] is None):
raise ValueError("Missing the required parameter `count` when calling `query_history`")
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'count' in params:
query_params.append(('count', params['count']))
if 'sort_column' in params:
query_params.append(('sortColumn', params['sort_column']))
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order']))
if 'start_date' in params:
query_params.append(('startDate', params['start_date']))
if 'end_date' in params:
query_params.append(('endDate', params['end_date']))
if 'user_identity' in params:
query_params.append(('userIdentity', params['user_identity']))
if 'source_id' in params:
query_params.append(('sourceId', params['source_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HistoryEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def schedule_components(self, id, body, **kwargs):
"""
Schedule or unschedule components in the specified Process Group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.schedule_components(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param ScheduleComponentsEntity body: The request to schedule or unschedule. If the comopnents in the request are not specified, all authorized components will be considered. (required)
:return: ScheduleComponentsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.schedule_components_with_http_info(id, body, **kwargs)
else:
(data) = self.schedule_components_with_http_info(id, body, **kwargs)
return data
def schedule_components_with_http_info(self, id, body, **kwargs):
"""
Schedule or unschedule components in the specified Process Group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.schedule_components_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The process group id. (required)
:param ScheduleComponentsEntity body: The request to schedule or unschedule. If the comopnents in the request are not specified, all authorized components will be considered. (required)
:return: ScheduleComponentsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method schedule_components" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `schedule_components`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `schedule_components`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/process-groups/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduleComponentsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_cluster(self, q, **kwargs):
"""
Searches the cluster for a node with the specified address
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_cluster(q, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q: Node address to search for. (required)
:return: ClusterSearchResultsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.search_cluster_with_http_info(q, **kwargs)
else:
(data) = self.search_cluster_with_http_info(q, **kwargs)
return data
def search_cluster_with_http_info(self, q, **kwargs):
"""
Searches the cluster for a node with the specified address
Note: This endpoint is subject to change as NiFi and it's REST API evolve.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_cluster_with_http_info(q, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q: Node address to search for. (required)
:return: ClusterSearchResultsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_cluster" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'q' is set
if ('q' not in params) or (params['q'] is None):
raise ValueError("Missing the required parameter `q` when calling `search_cluster`")
collection_formats = {}
path_params = {}
query_params = []
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/cluster/search-results', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClusterSearchResultsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_flow(self, **kwargs):
"""
Performs a search against this NiFi using the specified search term
Only search results from authorized components will be returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_flow(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q:
:param str a:
:return: SearchResultsEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.search_flow_with_http_info(**kwargs)
else:
(data) = self.search_flow_with_http_info(**kwargs)
return data
def search_flow_with_http_info(self, **kwargs):
"""
Performs a search against this NiFi using the specified search term
Only search results from authorized components will be returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_flow_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q:
:param str a:
:return: SearchResultsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['q', 'a']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_flow" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'q' in params:
query_params.append(('q', params['q']))
if 'a' in params:
query_params.append(('a', params['a']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/search-results', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResultsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
from data.COMMON import * #essentials
imgHeader(0.001,
( 'COMMONIMG', ['png'] ),
[])
#the image handling system is still very much in the design process,
#and is currently not used. (only verified)
#functions written in here may not be implamented in UMC yet.
def ImportImage(FT):
#TODO: only reads (doesn't verify)
#the updated logger will tell if there's an error
SIG = bh_(8,'',' -- signature')
while 1:
length=bu32(label=' -- data-length')
Type = string(4,label=' -- type')
#character case
#0 - CRITICAL | not-critical
#1 - PUBLIC | private
#2 - RESERVED | not-recognized
#3 - CHECK | safe
#(CHECK means to look for modifications that have
# not touched any critical chunks)
switch(Type)
if case('IHDR'): #always first
bu32(label=' -- width')
bu32(label=' -- height')
bu8(label=' -- bit-depth')
bu8(label=' -- colour-type')
bu8(label=' -- compression-type')
bu8(label=' -- filter-type')
bu8(label=' -- interlace-type')
if case('PLTE'):
StructArr(['u8','u8','u8'],length/3)
if case('IDAT'): bu_(length, ' -- image data')
if case('bkGD'): bu_(length, ' -- background color')
if case('cHRM'): bu_(length, ' -- chromanence')
if case('gAMA'): bu_(length, ' -- gamma correction')
if case('hIST'): bu_(length, ' -- histogram')
if case('iCCP'): bu_(length, ' -- ')
if case('iTXt'): bu_(length, ' -- ')
if case('pHYs'): bu_(length, ' -- ')
if case('sPLT'): bu_(length, ' -- ')
if case('sRGB'): bu_(length, ' -- ')
if case('sTER'): bu_(length, ' -- sterioscopic data')
if case('tEXt'): bu_(length, ' -- text data')
if case('tIME'): bu_(length, ' -- ')
if case('tRNS'): bu_(length, ' -- transparency data')
if case('zTXt'): bu_(length, ' -- zlib compressed text data')
CRC = bh32(label=' -- CRC')
from zlib import crc32
VER = struct.pack('!I', crc32(C_data,crc32(C_type)) & (2 ** 32 - 1) )
if CRC != VER:
# print repr(checksum)
(a,) = struct.unpack('!I', checksum)
(b,) = struct.unpack('!I', verify)
raise ChunkError(
"Checksum error in %s chunk: 0x%08X != 0x%08X." %
(type, a, b))
if Type=='IEND': break #always last
return data
def ExportImage(FT):
bh_(8,'89504E470D0A1A0A',' -- signature')
|
from flask import render_template, flash, redirect, session, url_for
from app import app
from .forms import PostForm, NameForm
from .models import Post
from flask.ext.bootstrap import Bootstrap
from . import db
bootstrap = Bootstrap(app)
@app.route('/')
@app.route('/index', methods=['GET', 'POST'])
def index():
name = None
form = NameForm()
if form.validate_on_submit():
if 'name' in session:
old_name = session['name']
if old_name and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
@app.route('/posts', methods=['GET', 'POST'])
def posts():
form = PostForm()
if form.validate_on_submit():
name = 'Anonymous'
if 'name' in session:
name = session['name']
post = Post(body=form.body.data, author=name)
db.session.add(post)
db.session.commit()
posts = Post.query.order_by(Post.timestamp.desc()).all()
return render_template('posts.html', form=form, posts=posts)
@app.route('/preguntas')
def preguntas():
return render_template('preguntas.html')
@app.route('/preguntas2')
def preguntas2():
return render_template('preguntas2.html')
@app.route('/preguntas3')
def preguntas3():
return render_template('preguntas3.html')
@app.route('/preguntas4')
def preguntas4():
return render_template('preguntas4.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
# módulo destinado a las clases y funcionalidades de los enemigos.
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLabel
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.Qt import QTest
from parameters import VEL_MOVIMIENTO, LAMBDA_HOSTIL, A_NO_HOSTIL, \
B_NO_HOSTIL, N, RANGO_VISION, VIDAS_ENEMIGOS, HOSTILES_INICIALES, \
COMUNES_INICIALES, AUMENTO_DIFICULTAD, NIVEL_DIFICULTAD
from random import choice, uniform, expovariate
from eventos import MoveEnemyEvent, RangoVisionEvent, SideMoveEvent, \
RecibirEnemigoEvent
from compare import colision
class Enemigo(QThread):
add_points = pyqtSignal(int)
id_ = 0
move_enemy = pyqtSignal(MoveEnemyEvent)
die_signal = pyqtSignal(int)
def __init__(self, parent, x, y, pixmap="assets/Illumi_Zoldyck.png"):
super().__init__(parent)
self.sizex = 20
self.sizey = 30
self.label = QLabel(parent)
self.label.setGeometry(x - self.sizex/2, y - self.sizey/2, 20, 30)
self.pixmap = QPixmap(pixmap)
self.label.setPixmap(self.pixmap)
self.label.setScaledContents(True)
self.label.show()
self.label.setVisible(True)
self._pos = (x - self.sizex/2, y - self.sizey/2)
self.vida = VIDAS_ENEMIGOS
self.side = choice(["up", "down", "left", "right"])
self.move_enemy.connect(parent.move_enemy)
self.die_signal.connect(parent.muere_enemigo)
self.add_points.connect(parent.add_enemy_points)
self.ready = True
self.nivel_dificultad = NIVEL_DIFICULTAD
self._velocidad = VEL_MOVIMIENTO
self.side_timer = None
self.last_side = None
self.time_wait = 100
self.id_ = Enemigo.id_
Enemigo.id_ += 1
while self._velocidad > 10:
self._velocidad = int(self._velocidad / 2)
self.time_wait = int(self.time_wait / 2)
if self.time_wait <= 10:
break
parent.trigger_aumento_dificultad.connect(self.aumentar_dificultad)
parent.trigger_recibir_explosion.connect(self.recibir_dano)
parent.trigger_pausar.connect(self.pausar)
parent.trigger_enemy_label_move.connect(self.move_label)
parent.trigger_enemy_back.connect(self.move_back)
self.start()
@property
def velocidad(self):
ret = self._velocidad
for _ in range(self.nivel_dificultad):
ret += 1.5
return ret
@property
def x(self):
return self._pos[0]
@property
def y(self):
return self._pos[1]
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, value):
if value[0] > N or value[1] > N:
return
if value[0] < 0 or value[1] < 0:
return
self._pos = value
@property
def mid_pos(self):
x, y = self._pos
return x + self.sizex/2, y + self.sizey/2
def set_pos(self, event):
if event.id_ == self.id_:
self._pos = (event.x, event.y)
def aumentar_dificultad(self):
self.nivel_dificultad += 1
def change_direction(self):
options = {"up", "down", "left", "right"}
options.discard(self.side)
self.side = choice(list(options))
def die(self):
self.die_signal.emit(self.id_)
self.label.deleteLater()
self.quit()
# self.deleteLater()
def pausar(self):
if self.ready:
self.ready = False
else:
self.ready = True
def recibir_dano(self, e):
if colision(self, e.espacio):
self.vida -= 1
self.add_points.emit(e.player)
def move_label(self, e):
if e.id_ == self.id_:
self.label.move(e.x, e.y)
self.label.hide()
self.label.show()
def move_back(self, e):
if e.id_ == self.id_:
x = self.label.x()
y = self.label.y()
self.pos = (x, y)
def run(self):
self.side_timer = 2
while self.vida > 0:
if self.ready:
x, y = self.pos
if self.side == "up":
self.pos = (x, y - self.velocidad)
elif self.side == "down":
self.pos = (x, y + self.velocidad)
elif self.side == "right":
self.pos = (x + self.velocidad, y)
elif self.side == "left":
self.pos = (x - self.velocidad, y)
self.move_enemy.emit(MoveEnemyEvent(self.x, self.y,
self.id_))
self.side_timer -= self.time_wait/1000
if self.side_timer <= 0:
self.change_direction()
self.side_timer = 2
QTest.qWait(self.time_wait)
self.die()
class EnemigoHostil(Enemigo):
hostil_movement = pyqtSignal(MoveEnemyEvent)
rango_vision_signal = pyqtSignal(RangoVisionEvent)
def __init__(self, parent, x, y):
super().__init__(parent, x, y, "assets/Hisoka.png")
self.pixmap_hostil = QPixmap("assets/Hisoka_hostil.png")
self.modo_hostil = False
self.jugador_perseguido = None
self.rango_vision = RANGO_VISION
self.hostil_movement.connect(parent.hostil_movement)
self.rango_vision_signal.connect(parent.revisar_rango_hostil)
parent.trigger_activar_hostilidad.connect(self.cambiar_hostilidad)
parent.trigger_enemy_way.connect(self.hostil_move)
def cambiar_hostilidad(self, e):
if e.id_ == self.id_:
if e.player is False:
self.label.setPixmap(self.pixmap)
self.modo_hostil = False
self.jugador_perseguido = None
else:
self.label.setPixmap(self.pixmap_hostil)
self.modo_hostil = True
self.jugador_perseguido = e.player
def hostil_move(self, e):
if e.id_ == self.id_:
x, y = self.pos
if e.side == "up":
self.pos = (x, y - self.velocidad)
elif e.side == "down":
self.pos = (x, y + self.velocidad)
elif e.side == "right":
self.pos = (x + self.velocidad, y)
elif e.side == "left":
self.pos = (x - self.velocidad, y)
x_, y_ = self.pos
if (x_ != x or y_ != y) and e.side == self.last_side:
self.last_side = None
else:
self.last_side = e.side
self.move_enemy.emit(MoveEnemyEvent(self.x, self.y,
self.id_))
self.rango_vision_signal.emit(
RangoVisionEvent(x_, y_, self.id_, self.rango_vision))
def run(self):
QTest.qWait(200)
self.side_timer = 2
while self.vida > 0:
if self.ready:
x, y = self.pos
if self.modo_hostil:
self.hostil_movement.emit(
MoveEnemyEvent(self.x + self.sizex/2,
self.y + self.sizey/2, self.id_,
self.jugador_perseguido,
self.last_side))
else:
if self.side == "up":
self.pos = (x, y - self.velocidad)
elif self.side == "down":
self.pos = (x, y + self.velocidad)
elif self.side == "right":
self.pos = (x + self.velocidad, y)
elif self.side == "left":
self.pos = (x - self.velocidad, y)
self.move_enemy.emit(MoveEnemyEvent(self.x, self.y,
self.id_))
x_, y_ = self.mid_pos
self.rango_vision_signal.emit(
RangoVisionEvent(x_, y_, self.id_, self.rango_vision))
self.side_timer -= self.time_wait/1000
if self.side_timer <= 0:
self.change_direction()
self.side_timer = 2
QTest.qWait(self.time_wait)
self.die()
class GeneradorDeEnemigos(QThread):
enviar_enemigos = pyqtSignal(RecibirEnemigoEvent)
pedir_lugar_signal = pyqtSignal()
aumentar_dificultad_signal = pyqtSignal()
def __init__(self, parent):
super().__init__(parent)
self.enviar_enemigos.connect(parent.recibir_enemigos)
self.pedir_lugar_signal.connect(parent.lugar_despliegue)
self.aumentar_dificultad_signal.connect(
parent.aumentar_dificultad_enemigos)
parent.trigger_pausar.connect(self.pausar)
self.ready = False
self.aumentos_dificultad = 0
self.contador_hostil = self.siguiente_hostil()
self.contador_comun = self.siguiente_comun()
self.next_x = None
self.next_y = None
self.parent = parent
self.times = 0
self.score_1 = 0
self.score_2 = 0
def actualizar_puntaje(self, e):
if e.player == 1:
self.score_1 = e.score
else:
self.score_2 = e.score
total = self.score_1 + self.score_2 \
- (self.aumentos_dificultad * AUMENTO_DIFICULTAD)
if total > AUMENTO_DIFICULTAD:
self.aumentos_dificultad += 1
self.aumentar_dificultad_signal.emit()
def pausar(self):
if self.ready:
self.ready = False
else:
self.ready = True
def siguiente_hostil(self):
parametro = LAMBDA_HOSTIL
for _ in range(self.aumentos_dificultad):
parametro /= 2
ret = expovariate(parametro)
return ret
def siguiente_comun(self):
parametro1 = A_NO_HOSTIL
parametro2 = B_NO_HOSTIL
for _ in range(self.aumentos_dificultad):
parametro1 /= 2
parametro2 /= 2
ret = uniform(parametro1, parametro2)
return ret
def enemigos_iniciales(self):
for _ in range(HOSTILES_INICIALES):
self.pedir_lugar_despliegue()
QTest.qWait(20)
self.crear_hostil()
for _ in range(COMUNES_INICIALES):
self.pedir_lugar_despliegue()
QTest.qWait(20)
self.crear_comun()
def crear_comun(self):
if not self.next_x or not self.next_y:
self.contador_comun = 1
return
QTest.qWait(20)
self.enviar_enemigos.emit(RecibirEnemigoEvent(
"comun", self.next_x, self.next_y))
def pedir_lugar_despliegue(self):
self.pedir_lugar_signal.emit()
def cambiar_lugar(self, e):
self.next_x = e.x
self.next_y = e.y
def crear_hostil(self):
if not self.next_x or not self.next_y:
self.contador_hostil = 1
return
QTest.qWait(20)
self.enviar_enemigos.emit(RecibirEnemigoEvent(
"hostil", self.next_x, self.next_y))
def die(self):
self.quit()
def run(self):
self.ready = True
self.enemigos_iniciales()
while True:
self.times = 0
if self.ready:
self.contador_hostil -= 0.2
self.contador_comun -= 0.2
if self.contador_hostil <= 0:
self.contador_hostil = self.siguiente_hostil()
self.pedir_lugar_despliegue()
self.crear_hostil()
self.times += 20
if self.contador_comun <= 0:
self.contador_comun = self.siguiente_comun()
self.pedir_lugar_despliegue()
self.crear_comun()
self.times += 20
QTest.qWait(200 - self.times)
|
# protection, remember to seal after you are done
if 1:
import os
import shutil
import glob
src = '.'
dest = os.path.join('html', 'resources')
# ideally we want to clean the resources folder before copying new files over, but
# because I am having trouble deleting it on pc https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
# let's skip this step for now. psychopy makes resources read-only
# it was fine when I created exp2 though, https://github.com/Xinzhu-Fang/exp7/issues/3#issuecomment-513632811
# print(os.path.isdir(dest))
# if os.path.isdir(dest):
# shutil.rmtree(dest)
# os.mkdir(dest)
##
src_files = glob.glob(os.path.join('*.csv'))
src_files.append('fixation.png')
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
print(full_file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
shutil.copytree('stimuli', os.path.join(dest, 'stimuli'))
|
from django.contrib import admin
from .models import BookInfo, HeroInfo
# Register your models here.
class HeroInfoInline(admin.TabularInline):
model = HeroInfo
extra = 3
class BookInfoAdmin(admin.ModelAdmin):
list_display = ['id', 'btitle', 'bpub_date']
list_filter = ['btitle']
search_fields = ['btitle']
list_per_page = 10
fieldsets = [
('base', {'fields': ['btitle']}),
('super', {'fields': ['bpub_date']})
]
inlines = [HeroInfoInline]
admin.site.register(BookInfo, BookInfoAdmin)
admin.site.register(HeroInfo)
|
"""Test the localization model.
"""
from sklearn.pipeline import make_pipeline
import numpy as np
import dask.array as da
from pymks.fmks.bases.primitive import discretize, redundancy
from pymks.fmks.localization import fit
from pymks.fmks.bases.primitive import PrimitiveTransformer
from pymks.fmks.localization import LocalizationRegressor
from pymks.fmks.data.delta import generate_delta
from pymks.fmks.data import solve_fe
def _get_x():
return da.from_array(np.linspace(0, 1, 4).reshape((1, 2, 2)), chunks=(1, 2, 2))
def test():
"""Very simple example."""
assert np.allclose(
fit(
_get_x(),
_get_x().swapaxes(1, 2),
discretize(n_state=2),
redundancy_func=redundancy,
),
[[[0.5, 0.5], [-2, 0]], [[-0.5, 0], [-1, 0]]],
)
def test_setting_kernel():
"""Test resetting the coeffs after coeff resize."""
x_data = generate_delta(n_phases=2, shape=(21, 21)).persist()
y_data = solve_fe(
x_data, elastic_modulus=(100, 130), poissons_ratio=(0.3, 0.3), macro_strain=0.01
)["strain"][..., 0].persist()
model = make_pipeline(PrimitiveTransformer(n_state=2), LocalizationRegressor())
shape = (30, 30)
fcoeff = model.fit(x_data, y_data).steps[1][1].coeff
assert np.allclose(model.steps[1][1].coeff_resize(shape).coeff.shape[:-1], shape)
model.steps[1][1].coeff = fcoeff
assert np.allclose(model.predict(x_data), y_data, atol=1e-4)
|
import random
def part(nums,left,right):
pivind=left
piv=nums[pivind]
while left<right:
while left<len(nums)and piv>=nums[left]:
left+=1
while nums[right]>piv:
right-=1
if left < right:
temp=nums[right]
nums[right]=nums[left]
nums[left]=temp
temp = nums[right]
nums[right] = nums[pivind]
nums[pivind] = temp
return right
def q_sort(nums,left,right):
if left<right:
pi=part(nums,left,right)
q_sort(nums, left, pi - 1)
q_sort(nums, pi + 1, right)
def lomuto(nums,left,right):
if left>=right:
return
p_index=i=left
pivotindex=right
piv=nums[pivotindex]
while i<=pivotindex :
while p_index<pivotindex and nums[p_index]<=piv:
p_index+=1
i=p_index
while nums[i] > piv:
i+= 1
temp = nums[p_index]
nums[p_index] = nums[i]
nums[i] = temp
if i==pivotindex:
break
lomuto(nums, left, p_index - 1)
lomuto(nums, p_index + 1, right)
if __name__ == '__main__':
nums=[random.randint(0,40) for i in range(30)]
lomuto(nums,0,len(nums)-1)
print(nums) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import urllib.request
import urllib.parse
import http.cookiejar
import json
import random
import math
import html5lib
import os
from bs4 import BeautifulSoup
import re
import ConnectUtils
school = {'1' : 'http://jwxt.gduf.edu.cn/jsxsd/xk/LoginTOXk' }
school_grade = {'1' : 'http://jwxt.gduf.edu.cn/jsxsd/ksxj/cjxc_list' }
school_rankgrade = {'1' : 'http://jwxt.gduf.edu.cn/jsxsd/kscj/djkscj_list' }
def login(schoolid, userid, userpwd):
global school
filename = schoolid+'#'+userid+'.txt'
url = school.get(schoolid)
cj = http.cookiejar.MozillaCookieJar(filename)
opener = ConnectUtils.getUrlOpener(cj, schoolid, 'login')
postData = urllib.parse.urlencode({'encoded': ConnectUtils.encodedInp(userid)+'%%%'+ConnectUtils.encodedInp(userpwd) })
pattern = re.compile(ur'<div id="Top1_divLoginName" class="Nsb_top_menu_nc" style="color: #000000;"(.+?)/div>')
try:
op = opener.open(url, postData)
result = op.read().decode('utf-8')
user = re.findall(pattern, result)
if len(user) == 0:
statu = '101'
return {'statu':statu }
else:
statu = '100'
cj.save(ignore_discard=True, ignore_expires=True)
username = re.findall(ur'\>(.+?)\(', user[0])[0]
userid = re.findall(ur'\((.+?)\)', user[0])[0]
return {'statu':statu,
'username':username,
'userid':userid,
'schoolid':schoolid }
except Exception,e:
print(e)
return {'statu':'102'}
def geaGrade(schoolid, userid, classTime, classNature, className, classShow):
global school_grade
filename = schoolid+'#'+userid+'.txt'
if not os.path.exists(filename):
return {'statu':'103'}
url = school_grade.get(schoolid)
cj = http.cookiejar.MozillaCookieJar()
cj.load(filename, ignore_discard=True, ignore_expires=True)
opener = ConnectUtils.getUrlOpener(cj, schoolid, 'grade')
postData = {'kksj':classTime,
'kcxz':classNature,
'kcmc':className,
'xsfs':classShow }
postData = urllib.parse.urlencode(postData)
try:
op = opener.open(url, postData)
soup = BeautifulSoup(op.read().decode('utf-8'), 'html5lib', from_encoding='utf-8')
tableDiv = soup.find_all('div', class_='Nsb_pw')
if tableDiv:
items = tableDiv[2].find_all('tr')
if items:
info = []
if len(items)==2 and len(items[1].find_all('td'))==1:
statu = '101'
print('empty')
return {'statu': statu}
else:
for i in range(1, len(items)):
tds = items[i].find_all('td')
kcbh = tds[2].text
kcmc = tds[3].text
kccj = tds[4].find('a').text
kcxf = tds[5].text
kcjd = tds[7].text
kcsx = tds[9].text
info.append({'kcbh':kcbh,
'kcmc':kcmc,
'kccj':kccj,
'kcxf':kcxf,
'kcjd':kcjd,
'kcsx':kcsx})
statu = '100'
return {'statu':statu, 'info':info}
else:
return {'statu':'102'}
else:
return {'statu':'102'}
except Exception,e:
print(e)
return {'statu':'102'}
def getRankGrade(schoolid, userid):
global school_rankgrade
filename = schoolid+'#'+userid+'.txt'
if not os.path.exists(filename):
return {'statu':'103'}
url = school_rankgrade.get(schoolid)
cj = http.cookiejar.MozillaCookieJar()
cj.load(filename, ignore_discard=True, ignore_expires=True)
opener = ConnectUtils.getUrlOpener(cj, schoolid, 'rankgrade')
try:
op = opener.open(url)
soup = BeautifulSoup(op.read().decoded('utf-8'), 'html5lib', from_encoding='utf-8')
table = soup.find("table",attrs={"id":"dataList"})
if table:
li = table.find_all("tr")
if len(li)>2:
info = []
for i in range(2,len(li)):
tds = li[i].find_all("td");
zkzh = tds[1].text
kjkc = tds[2].text
bscj1 = tds[3].text
jscj1 = tds[4].text
zcj1 = tds[5].text
fslcj = {'bscj':bscj1, 'jscj':jscj1, 'zcj':zcj1}
bscj2 = tds[6].text
jscj2 = tds[7].text
zcj2 = tds[8].text
djlcj = {'bscj':bscj2, 'jscj':jscj2, 'zcj':zcj2}
kjsj = tds[9].text
item = {'zkzh':zkzh, 'kjkc':kjkc, 'fslcj':fslcj, 'djlcj':djlcj, 'kjsj':kjsj}
info.append(item)
return {'statu':'100','info':info}
elif len(li)==2:
return {'statu':'101'}
else:
return {'statu':'102'}
else:
return {'statu':'102'}
except Exception,e:
print(e)
return {'statu':'102'}
|
import os
import json
from collections import defaultdict,Counter
import re
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
corpus = {}
with open('corpus_data/preprocessed_corpus.json') as corpus:
corpus = json.loads(corpus.read().encode('utf-8'))
corpus_2 = defaultdict(str)
for artist,songlist in corpus.items():
for song in songlist:
lyrics = song['lyrics'].strip('\\')
corpus_2[artist] += lyrics
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
STOPWORDS = stopwords.words('english')
profanity = set(['fuck','bitch','nigga','shit','money','right','never','fuckin','fucking','never','motherfucker'])
def clean_text(text):
tokenized_text = word_tokenize(text.lower())
tokenized_text = [token for token in tokenized_text if len(token) > 4]
cleaned_text = [t for t in tokenized_text if t not in STOPWORDS and re.match('[a-zA-Z\-][a-zA-Z\-]{2,}', t)]
cleaned_text = [get_lemma(token) for token in cleaned_text]
#cleaned_text = [token for token in cleaned_text if token not in profanity]
return cleaned_text
common = Counter()
for artist,lyrics in corpus_2.items():
common += Counter(clean_text(lyrics))
print(common.most_common(100))
with open('corpus_data/rapsw.txt','w') as ofile:
for key in common.most_common(100):
ofile.write(key[0] + " \n") |
import sys
data = []
for k in range(30):
a = len(data)
b = sys.getsizeof(data)
print("length: {}, size in bytes: {}".format(a, b))
data.append(None) |
from django.shortcuts import render
from django.http.response import HttpResponse
import os
import time
# Create your views here.
from django.template import loader
from Myapp_covert2musicscore.utils.music21tools import *
def use_music21(music_str):
pan = False
print('准备预测')
for each in music_str:
if each.isupper() == True:
pan = True
break
# 如果没有大写字母,也就是纯数字
if pan == False:
print('纯数字')
s = musicstr_to_stream(music_str)
else:
print('大写字母')
s = musicstr_char_to_stream(music_str)
png_filname = write_xml_and_get_png(s)
wav_filname = write_midi_and_get_wav(s)
return png_filname+'-1.png',wav_filname+'.wav'
'''
Summary:
将音乐字符串转换成图片
Return:
返回转换后的结果
'''
def convert_musicstr_2_pic(request):
if request.method == 'GET':
print(request.GET)
music_str = request.GET.get('music_str')
print(music_str)
elif request.method == 'POST':
music_str = request.POST.get('str-container')
print(request.POST)
print(music_str)
musicname,wavname = use_music21(music_str)
return render(request, 'preview.html', {'musicname':musicname,'wavname':wavname})
# 将字符串放入上下文,以后用
# return render(request, 'test_show_musicscore_pic.html', {'musicname':musicname+'-1.png'})
'''
Summary:
展示字符串上传的首页
Return:
首页的html
'''
def show_uploadstr_index(request):
return render(request, 'str2music.html', {})
'''
Summary:
展示图片(测试用)
Return:
含有图片的html(测试用)
'''
def test_show_pic(request):
print('到这里')
template = loader.get_template('test_show_musicscore_pic.html')
context = {}
return HttpResponse(template.render(context, request))
|
import cv2
import torch
from tqdm import tqdm
from population import Population
folder = "image.jpg"
pic_size = 64
drawing_pic_size = 512
population_size = 30
number_of_polygons = 50
pop_cut = 0.1
iterations = 4000
output_pic_name = "output_image.png"
def pic_show(img, img_name=output_pic_name):
cv2.imshow(img_name, img)
cv2.imwrite(img_name, img=img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def prepare_image(img_size):
img = cv2.imread(folder)
height, width, _ = img.shape
if abs(height - width) > 2:
cut = (max(height, width) - min(height, width)) // 2
if height > width:
img = img[cut: -cut]
elif width > height:
img = img[:, cut: -cut]
img = cv2.resize(img, (img_size, img_size))
return img
def main():
original = prepare_image(pic_size)
tensor_img = torch.from_numpy(original).float().cuda()
full_tensor = torch.from_numpy(prepare_image(drawing_pic_size)).float().cuda()
res_img = torch.zeros_like(tensor_img).float().cuda()
population = Population(population_size, pop_cut, pic_size, number_of_polygons, tensor_img)
for i in tqdm(range(iterations)):
population.mutative_crossover()
best = population.get_fittest()
res_img = best.polygons_to_canvas(full_tensor, drawing_pic_size)
if i % 10 == 0:
cv2.imwrite("s_" + output_pic_name, img=torch.round(res_img).byte().cpu().numpy())
pic_show(torch.round(res_img).byte().cpu().numpy())
main()
|
import argparse
import os.path
import torch
import numpy as np
from torchvision import datasets, transforms
class Disjoint(object):
def __init__(self, args):
super(Disjoint, self).__init__()
self.upperbound = args.upperbound
self.n_tasks = args.n_tasks
self.i = args.i
self.train_file = args.train_file
self.test_file = args.test_file
self.dataset = args.dataset
if self.upperbound:
self.o_train = os.path.join(args.o, 'upperbound_disjoint_' + str(self.n_tasks) + '_train.pt')
self.o_test = os.path.join(args.o, 'upperbound_disjoint_' + str(self.n_tasks) + '_test.pt')
else:
self.o_train = os.path.join(args.o, 'disjoint_' + str(self.n_tasks) + '_train.pt')
self.o_test = os.path.join(args.o, 'disjoint_' + str(self.n_tasks) + '_test.pt')
def load_cifar10(self):
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
dataset_train = datasets.CIFAR10(root='./Datasets', train=True, download=True, transform=transform_train)
tensor_data = torch.Tensor(len(dataset_train),3,32,32)
tensor_label = torch.LongTensor(len(dataset_train))
for i in range(len(dataset_train)):
tensor_data[i] = dataset_train[i][0]
tensor_label[i] = dataset_train[i][1]
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
dataset_test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
tensor_test = torch.Tensor(len(dataset_test),3,32,32)
tensor_label_test = torch.LongTensor(len(dataset_test))
for i in range(len(dataset_test)):
tensor_test[i] = dataset_test[i][0]
tensor_label_test[i] = dataset_test[i][1]
#testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
return tensor_data, tensor_label, tensor_test, tensor_label_test
def formating_data(self):
tasks_tr = []
tasks_te = []
if self.dataset == 'cifar10':
x_tr, y_tr, x_te, y_te = self.load_cifar10()
x_tr = x_tr.float().view(x_tr.size(0), -1)
x_te = x_te.float().view(x_te.size(0), -1)
else:
assert os.path.isfile(os.path.join(self.i, self.train_file))
assert os.path.isfile(os.path.join(self.i, self.test_file))
x_tr, y_tr = torch.load(os.path.join(self.i, self.train_file))
x_te, y_te = torch.load(os.path.join(self.i, self.test_file))
x_tr = x_tr.float().view(x_tr.size(0), -1) / 255.0
x_te = x_te.float().view(x_te.size(0), -1) / 255.0
y_tr = y_tr.view(-1).long()
y_te = y_te.view(-1).long()
cpt = int(10 / self.n_tasks)
for t in range(self.n_tasks):
if self.upperbound:
c1 = 0
else:
c1 = t * cpt
c2 = (t + 1) * cpt
i_tr = ((y_tr >= c1) & (y_tr < c2)).nonzero().view(-1)
i_te = ((y_te >= c1) & (y_te < c2)).nonzero().view(-1)
tasks_tr.append([(c1, c2), x_tr[i_tr].clone(), y_tr[i_tr].clone()])
tasks_te.append([(c1, c2), x_te[i_te].clone(), y_te[i_te].clone()])
torch.save(tasks_tr, self.o_train)
torch.save(tasks_te, self.o_test)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--i', default='raw/cifar100.pt', help='input directory')
parser.add_argument('--o', default='cifar100.pt', help='output file')
parser.add_argument('--n_tasks', default=10, type=int, help='number of tasks')
parser.add_argument('--seed', default=0, type=int, help='random seed')
args = parser.parse_args()
torch.manual_seed(args.seed)
DataFormater = Disjoint()
DataFormater.formating_data(args)
|
from app.api_functions.database import get_db
from fastapi import APIRouter
from typing import List
from fastapi import status
from fastapi.params import Depends
from sqlalchemy.orm import Session
from app.models import schemas
from app.logic.oauth2 import get_current_user
from app.logic import user
router = APIRouter(prefix="/user", tags=['User'])
@router.post('/',status_code=status.HTTP_201_CREATED, response_model=schemas.UserResponse)
async def create_user(request: schemas.User,db: Session = Depends(get_db)):
return user.create_user(request,db)
@router.get('/',tags=['User'],response_model=List[schemas.UserResponse])
async def get_users(db: Session = Depends(get_db),current_user:schemas.User= Depends(get_current_user)):
return user.get_users(db)
@router.get('/{id}',status_code=status.HTTP_200_OK, response_model=schemas.UserResponse)
async def get_user(id:int, db: Session = Depends(get_db),current_user:schemas.User= Depends(get_current_user)):
return user.get_user(id,db)
@router.put('/{id}',status_code=status.HTTP_202_ACCEPTED)
async def update_user(id:int,request: schemas.User,db: Session = Depends(get_db),current_user:schemas.User= Depends(get_current_user)):
return user.update_user(id,request,db)
|
a={1:'he','nu':'hehe'}
print(a[1],a['nu'])
print(a.values())
print(a.keys())
a=['key1','key2','key3']
dic=dict.fromkeys(a,10)
print(dic) |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from statistics import mean
import random
# X = np.array([1,2,3,4,5,6],dtype='float64')
# y = np.array([5,4,6,5,6,7],dtype='float64')
def createDataset(samples,variance,step=2,correlation=False):
val = 1
y = []
for _ in range(samples):
point = val + random.randrange(-variance,variance)
y.append(point)
if correlation == "pos":
val+=step
elif correlation == "neg":
val-=step
X = []
for x in range(len(y)):
X.append(x)
X = np.array(X,dtype="float64")
y = np.array(y,dtype="float64")
return X,y
X,y = createDataset(50,50,correlation="pos")
def fit(X,y):
slope = (((mean(X) * mean(y)) - mean(X * y)) /
(mean(X)**2 - mean(X**2)))
intercept = (mean(y) - slope * mean(X))
return round(slope,2), intercept
def squarredError(yPred,y):
return sum((yPred-y)**2)
def coefficient_of_determination(y,yPred):
y_meanLine = []
for y_ in y:
y_meanLine.append(mean(y))
squarred_error_of_regLine = squarredError(yPred,y)
squarred_error_of_meanLine = squarredError(y_meanLine,y)
return round(1 - (squarred_error_of_regLine / squarred_error_of_meanLine),2)
m, b = fit(X,y)
print("slope:",m,"\nintercept:",b)
#creating regression line:
# for x in X:
# regressionLine.append((m*x)+b)
regressionLine = [(m*x)+b for x in X] #this will generate a list of y predicted coordinates
#print(regressionLine)
rSquarred = coefficient_of_determination(y,regressionLine)
print("coefficient_of_determination:",rSquarred)
#predicting a y value corressponding to a new x value
new_x = 8
predicted_y = (m*new_x)+b
style.use('fivethirtyeight')
plt.scatter(X,y)
plt.scatter(new_x,predicted_y,s=100,color='red')
plt.plot(X,regressionLine)
plt.show()
|
'''
Created on Apr 10, 2016
There are three types of edits that can be performed on strings: insert a character,
remove a character, or replace a character. Given two strings, write a function to check
if they are one edit (or zero edit) away
@author: chunq
'''
def isOneAway(str1, str2):
if len(str1) == len(str2):
return isOneEditAway(str1, str2)
elif len(str1) - len(str2) == 1:
return isOneInsertAway(str1, str2)
elif len(str2) - len(str1) == 1:
return isOneInsertAway(str2, str1)
return False
def isOneEditAway(str1, str2):
hasDifference = False
for index in range(0, len(str1)):
if str1[index] != str2[index]:
if hasDifference:
return False
else:
hasDifference = True
return True
def isOneInsertAway(longString, shortString):
hasDifference = False
index2 = 0
for index in range(0, len(shortString)):
index2 += 1
if shortString[index] != longString[index2]:
if hasDifference:
return False
else:
hasDifference = True
index2 += 1
return True
if __name__ == '__main__':
print(isOneAway('bale', 'pale'))
print(isOneAway('apple', 'pple'))
print(isOneAway('apple', 'pplde')) |
from selenium import webdriver
browser = webdriver.Chrome()
browser.get("http://www.yahoo.com")
assert "Yahoo!" in browser.title
browser.close()
#coding=gbk
from selenium import selenium
def selenium_init(browser,url,para):
sel = selenium('localhost', 4444, browser, url)
sel.start()
sel.open(para)
sel.set_timeout(60000)
sel.window_focus()
sel.window_maximize()
return sel
def selenium_capture_screenshot(sel):
sel.capture_screenshot("d:\\singlescreen.png")
def selenium_get_value(sel):
innertext=sel.get_eval("this.browserbot.getCurrentWindow().document.getElementById('urla').innerHTML")
url=sel.get_eval("this.browserbot.getCurrentWindow().document.getElementById('urla').href")
print("The innerHTML is :"+innertext+"\n")
print("The url is :"+url+"\n")
def selenium_capture_entire_page_screenshot(sel):
sel.capture_entire_page_screenshot("d:\\entirepage.png", "background=#CCFFDD")
if __name__ =="__main__" :
sel1=selenium_init('*firefox3','http://202.108.23.172','/m?word=mp3,http://www.slyizu.com/mymusic/VnV5WXtqXHxiV3ZrWnpnXXdrWHhrW3h9VnRkWXZtXHp1V3loWnlrXXZlMw$$.mp3,,[%B1%A7%BD%F4%C4%E3+%CF%F4%D1%C7%D0%F9]&ct=134217728&tn=baidusg,%B1%A7%BD%F4%C4%E3%20%20&si=%B1%A7%BD%F4%C4%E3;;%CF%F4%D1%C7%D0%F9;;0;;0&lm=16777216&sgid=1')
selenium_get_value(sel1)
selenium_capture_screenshot(sel1)
sel1.stop()
sel2=selenium_init('*firefox3','http://www.sina.com.cn','/')
selenium_capture_entire_page_screenshot(sel2)
sel2.stop()
|
#inheritance
class Employee:
def __init__(self):
self.__id=10
self._name="ABC"
self.salary=100
class Student(Employee):
def Display(self):
#print(self.__id) unavailable to object
print(self._name)
print(self.salary)
ob=Student()
ob.Display()
print(ob._name,ob.salary)#both unavailable to object
|
import requests
import json
r = {}
start_index = 0
query = input("What would you like to search for? ")
def niceprint(dct):
print("\n")
for book in dct:
for k, v in book.items():
print("{: >10} {: >10}".format(k, v))
print("\n")
def search(start_index, query=query):
base_url = {
'book': f"https://www.googleapis.com/books/v1/volumes?q={query}&startIndex={start_index}",
'author': f"https://www.googleapis.com/books/v1/volumes?q=inauthor:{query}"}
print("Searching for: ", base_url['book'])
r.update(requests.get(base_url['book']).json())
tidy_output = []
for index, item in enumerate(r['items']):
try:
authors = ', '.join(item['volumeInfo']['authors'])
except KeyError:
authors = "Unknown author"
tidy_output.append(
{
index: f"{item['volumeInfo']['title']} - {authors}",
}
)
niceprint(tidy_output)
return tidy_output
search(start_index, query)
while(1):
moreInfo = input(
"Enter number of book to read about, Z: Back, X: Forward, N: Search ")
try:
if(int(moreInfo) >= 0 and int(moreInfo) < 10):
moreInfo = int(moreInfo)
print(f"Further details on: {moreInfo}...\n")
print(
f"{r['items'][moreInfo]['volumeInfo']['description'][: 400]}...\n\n")
except KeyError:
print("No description found!")
except ValueError:
if(moreInfo.lower() == "z" and start_index > 0):
print("Going back a page...")
start_index -= 10
search(start_index)
elif(moreInfo.lower() == "x"):
print("Going forward a page...")
start_index += 10
search(start_index)
elif(moreInfo.lower() == "n"):
query = input("What would you like to search for? ")
search(0, query)
|
from pprint import pprint
import socket
import packetcodec
from binascii import hexlify
UDP_IP = "0.0.0.0"
UDP_PORT = 56700
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(1024)
packet = packetcodec.decode_packet(data)
print(addr, unicode(packet))
pprint(packet.payload.data)
|
from certificate_verification import RuleMaker
from common import Certificate, SingletonMeta
class CertificateVerifier(metaclass=SingletonMeta):
@staticmethod
def verify(cert: Certificate, rule_maker: RuleMaker) -> bool:
# In short, it is to check whether each steel plate in the certificate has passed all the verification rules
# The return value indicates whether everything in the given certificate pass the test
return all([all([limit.verify(plate) for limit in rule_maker.get_rules(plate)]) for plate in cert.steel_plates])
class BaoSteelCertificateVerifier(CertificateVerifier):
pass
class LongTengCertificateVerifier(CertificateVerifier):
pass
# class CertificateVerifier:
# @staticmethod
# def verify(cert: Certificate, rule_maker: RuleMaker) -> bool:
# all_valid_flag = True
# for steel_plate_index in range(len(cert.serial_numbers)):
# print(f"Checking Steel Plate No. {cert.steel_plates[steel_plate_index].serial_number}:")
# limit_list = rule_maker.get_rules(cert, steel_plate_index)
# for limit in limit_list:
# certificate_element = limit.get_element(cert, steel_plate_index)
# if isinstance(certificate_element, CertificateElementToVerify):
# if isinstance(certificate_element, ChemicalElementValue):
# certificate_element.valid_flag, certificate_element.message = limit.verify(
# certificate_element.calculated_value)
# else:
# certificate_element.valid_flag, certificate_element.message = limit.verify(
# certificate_element.value)
# all_valid_flag = all_valid_flag and certificate_element.valid_flag
# else:
# pass_flag, _ = limit.verify(certificate_element)
# all_valid_flag = all_valid_flag and pass_flag
# fine_grain_elements_limit_list = rule_maker.get_fine_grain_elements_rules(cert, steel_plate_index)
# if not cert.specification.valid_flag:
# print(cert.specification.message)
# if not cert.thickness.valid_flag:
# print(cert.thickness.message)
# if not cert.steel_plates[steel_plate_index].delivery_condition.valid_flag:
# print(cert.steel_plates[steel_plate_index].delivery_condition.message)
# if len(fine_grain_elements_limit_list) > 0:
# print(
# f"Eligible fine grain element combinations are ["
# f"{', '.join([str(limit) for limit in fine_grain_elements_limit_list])}]."
# )
# fine_grain_elements_valid_flag = False
# for limit in fine_grain_elements_limit_list:
# certificate_element = limit.get_element(cert, steel_plate_index)
# pass_flag, _ = limit.verify(certificate_element)
# if pass_flag:
# fine_grain_elements_valid_flag = True
# break
# if not fine_grain_elements_valid_flag:
# CertificateVerifier.update_fine_grain_elements(cert, steel_plate_index, 'Alt')
# CertificateVerifier.update_fine_grain_elements(cert, steel_plate_index, 'Als')
# CertificateVerifier.update_fine_grain_elements(cert, steel_plate_index, 'Ti')
# CertificateVerifier.update_fine_grain_elements(cert, steel_plate_index, 'Nb')
# all_valid_flag = False
# return all_valid_flag
#
# @staticmethod
# def update_fine_grain_elements(cert: Certificate, steel_plate_index: int, element: str):
# chemical_composition = cert.steel_plates[steel_plate_index].chemical_compositions
# error_message = (
# f"Fine Grain Elements don't meet the requirements of Specification {cert.specification.value} "
# f"and Delivery Condition {cert.steel_plates[steel_plate_index].delivery_condition.value} and "
# f"Thickness {cert.thickness.value}."
# )
# if element in chemical_composition:
# chemical_composition[element].valid_flag = False
# chemical_composition[element].message = error_message
# else:
# chemical_composition[element] = ChemicalElementValue(
# table_index=1,
# x_coordinate=None,
# y_coordinate=None,
# value=None,
# index=steel_plate_index,
# valid_flag=False,
# message=error_message,
# element=element,
# precision=None
# )
# def authenticate() -> bool:
# user_name = input("Please sign in with your user name: ").strip()
# password = input("Please input the password: ").strip()
# url = f"http://127.0.0.1:8000/authenticate/{user_name}?password={password}"
#
# payload = {}
# headers = {
# 'accept': 'application/json'
# }
#
# response = requests.request("GET", url, headers=headers, data=payload)
# parsed_response = json.loads(response.text)
# print(parsed_response['message'])
# if parsed_response['exit_code'] == 0:
# return True
# else:
# return False
# def process():
# register = CertificateFactoryRegister()
# register.register_factory(steel_plant='BAOSHAN IRON & STEEL CO., LTD.',
# certificate_factory=BaoSteelCertificateFactory())
# register.register_factory(steel_plant='CHANGSHU LONGTENG SPECIAL STEEL CO., LTD',
# certificate_factory=LongTengCertificateFactory())
#
# # Print the current working directory
# print(os.getcwd())
# # List the pdf files and subdirectories in the working directory
# certificate_files = []
# subdirectories = []
# with os.scandir() as it:
# for entry in it:
# if entry.is_file():
# if entry.name.lower().endswith('.pdf') or entry.name.lower().endswith(
# '.doc') or entry.name.lower().endswith('.docx'):
# certificate_files.append(entry.name)
# if entry.is_dir():
# subdirectories.append(entry.name)
#
# # create the destination folders if they don't exist
# if 'PASS' not in subdirectories:
# os.mkdir('PASS')
# if 'FAIL' not in subdirectories:
# os.mkdir('FAIL')
# if 'EXCEPTION' not in subdirectories:
# os.mkdir('EXCEPTION')
#
# passed_certificates: List[Certificate] = []
# certificates_with_exception: List[Tuple[str, str]] = []
# # Iterate the pdf files, read each pdf file and verify, and distribute the files to respective destination folders
# # certificates = []
# for file in certificate_files:
# print(f"\n\nProcessing file {file} ...")
# try:
# with CommonUtils.open_file(file) as cert_file:
# factory = register.get_factory(steel_plant=cert_file.steel_plant)
# certificates = factory.read(file=cert_file)
# # print(certificate)
# for certificate in certificates:
# valid_flag = CertificateVerifier.verify(certificate, factory.get_rule_maker())
# # with open(file.replace('.pdf', '.pickle'), 'wb') as f:
# # pickle.dump(certificate, f)
# if valid_flag:
# print(f"Verification Pass!")
# shutil.copy(file, 'PASS')
# # os.remove is used instead of shutil.move because of compatibility problem with pyinstaller
# os.remove(file)
# passed_certificates.append(certificate)
# else:
# print(f"Verification Fail!")
# shutil.copy(file, 'FAIL')
# # os.remove is used instead of shutil.move because of compatibility problem with pyinstaller
# os.remove(file)
# write_single_certificate_to_excel(
# certificate=certificate,
# sheet_name='FAIL',
# output_file=os.path.join('FAIL', file.replace('.pdf', '.xlsx'))
# )
# except Exception as e:
# print(f"Exception occurred during reading the PDF file!")
# print(e)
# shutil.copy(file, 'EXCEPTION')
# # os.remove is used instead of shutil.move because of compatibility problem with pyinstaller
# os.remove(file)
# # with open(os.path.join('EXCEPTION', file.replace('.pdf', '.txt')), 'w') as f:
# # f.write(str(e))
# certificates_with_exception.append((file, str(e)))
# write_multiple_certificates_to_excel(passed_certificates)
# write_certificates_with_exception(certificates_with_exception)
if __name__ == '__main__':
pass
# try:
# authentication_failure_count = 0
# while not (authentication_result := authenticate()):
# authentication_failure_count += 1
# if authentication_failure_count >= 3:
# print("Authentication failed three times!")
# sys.exit()
# process()
# except Exception as e:
# print(e)
# input(f"Click enter or close button to finish...")
# Above is the previous version
|
""" Groove Detection:
These functions are for detecting grooves on a record.
The order of the functions in this file is the order of
expected use.
Groove data is produced from image data and
a known center point.
"""
import cv2 as cv
import numpy as np
import pandas as pd
import operator
import Data
class Groove:
def __init__(self, angular_data=list(), next_groove=None, last_groove=None):
self.angular_data = angular_data
best_fit = np.polyfit(self.get_theta_axis(), self.get_rho_axis(), 1)
self.slope = best_fit[0]
self.next_groove = next_groove
self.last_groove = last_groove
def get_theta_axis(self):
return [point[0] for point in self.angular_data]
def get_rho_axis(self):
return [point[1] for point in self.angular_data]
def load_grey_scale(path):
bw = cv.imread(path, cv.IMREAD_GRAYSCALE)
return bw
def slice_image(image, center, inner_radius, outer_slice_radius, inner_slice_radius):
black2 = np.zeros(image.shape, np.uint8)
cv.circle(black2, center, (inner_radius + outer_slice_radius), (255, 255, 255), -1)
cv.circle(black2, center, (inner_radius + inner_slice_radius), (0, 0, 0), -1)
return cv.bitwise_and(image, black2)
def apply_threshold(image):
image_threshold = cv.adaptiveThreshold(image, 80, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 57, 0)
return image_threshold
def image_to_skeleton(image):
element = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
done = False
size = np.size(image)
skeleton = np.zeros(image.shape, np.uint8)
while not done:
eroded = cv.erode(image, element)
temp = cv.dilate(eroded, element)
temp = cv.subtract(image, temp)
skeleton = cv.bitwise_or(skeleton, temp)
image = eroded.copy()
zeros = size - cv.countNonZero(image)
if zeros == size:
done = True
indices = np.where(skeleton > [0])
return indices
""" I want this to return a list of tuples (rho, theta), and I don't care about sorting.
Not doing the sort here really sped things up. (That needs to be implemented here. It's in the
test script)
"""
def skeleton_to_points(indices, center):
points = list()
x = indices[1][0] - centerSmall[0]
y = centerSmall[1] - indices[0][1]
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
if theta < 0:
theta = theta + 2 * np.pi
print(len(indices[:]))
contoursThetaRho = np.array([theta, rho])
print("Printing Theta Rho:")
print(contoursThetaRho)
for i in range(1, len(indices[0])):
x = indices[1][i] - centerSmall[0]
y = centerSmall[1] - indices[0][i]
rho = np.sqrt(x ** 2 + y ** 2)
theta = (np.arctan2(y, x))
if theta < 0:
theta = theta + 2 * np.pi
points.append((rho, theta))
return points
def points_to_grooves(histogram, bin_edges, inclusion_threshold, points=list()):
""" The histogram tells us the distribution of points in the data set.
The more points in a bin, the more likely that there's a groove in the bin.
Moving through the histogram array backwards, check if the bin meets the
inclusion threshold. If the bin meets the inclusion threshold, then
take the points in that bin, and instantiate a Groove object.
Inclusion threshold will depend on bin sizing. This could support adding points
from multiple bins. For example, find a bin that meets inclusion and keep including
bins until the inclusion threshold isn't met anymore.
h: [30, 291, 546, ...]
bin_edges: [-3.20, -1.83, -0.45, ..]
bin_0 contains 30 values and it spans [-3.20, -1.83) (last bin is [])
I'm assuming that points is a list of tuples (rho, theta) sorted from max to min rho.
But I'm not going to hack up skeleton_to_points yet.
This might be improved by tacking the bin that proceed the first valid on. There's a kind of cut-off
going on right now.
"""
grooves = list()
points_temp = list()
last_bin_valid = False
# it might be worth checking for duplicates (not sure it's possible, but a check might be worth it)
""" This for loop is currently running in the wrong direction.
This was done on purpose. (I didn't want to think about the
problem backwards yet)
"""
for histogram_bin in range(len(histogram)):
this_bin_valid = False
if histogram[histogram_bin] > inclusion_threshold:
this_bin_valid = True
bin_edge_min = bin_edges[histogram_bin]
bin_edge_max = bin_edges[histogram_bin + 1]
""" Handling special case of final bin being [ ] (opposed to [ ) for
other bins.
This will go through the entire list. Might be worth finding a better way.
"""
if histogram_bin != len(histogram):
[points_temp.append(point) for point in points if (bin_edge_min <= point[1] < bin_edge_max)]
else:
[points_temp.append(point) for point in points if (bin_edge_min <= point[1] <= bin_edge_max)]
""" If the current bin is invalid, and the last bin was valid, then
we've collected the points in a groove.
To do: handle linking. (if necessary, the order in the list is
probably a sufficient way to deal with linking). Handling this here
might deal with that "I did it backwards" thing.
"""
if this_bin_valid is False and last_bin_valid is True:
# I'm not convinced that rejecting outliers is useful.
#points_temp = points_reject_rho_outliers(points_temp, m=3)
#points_temp = points_reject_theta_outliers(points_temp)
# Sort by radius in order of increasing angle.
points_temp.sort(key=operator.itemgetter(1))
grooves.append(Groove(points_temp, None, None))
points_temp = list()
last_bin_valid = this_bin_valid
if len(grooves) == 0:
raise RuntimeError('no grooves were found, try adjusting the inclusion threshold and number of bins used.')
return grooves
def points_reject_rho_outliers(data, m=2):
rhos = [point[0] for point in data]
median = np.median(rhos)
d = [np.abs(rho - median) for rho in rhos]
median_d = np.median(d)
s = d/median_d if median_d else 0
return [point for i, point in enumerate(data) if s[i] < m]
def points_reject_theta_outliers(data, m=2):
thetas = [point[1] for point in data]
median = np.median(thetas)
d = [np.abs(theta - median) for theta in thetas]
median_d = np.median(d)
s = d/median_d if median_d else 0
return [point for i, point in enumerate(data) if s[i] < m]
|
# -*- coding:UTF-8 -*-
from rest_framework import serializers
from . import models
from wxpay.views import dwd
class OrderCallbackSerializer(serializers.ModelSerializer):
class Meta:
model = models.OrderCallback
fields = '__all__'
|
from startup_db_test_env.startup_env_arg_builder import StartupDBDevEnvConfigArgBuilder
from startup_db_test_env.startup_env import StartupDBDevEnv
class ExecStartupEnv:
def __init__(self, config_arg_builder: StartupDBDevEnvConfigArgBuilder, logger_stdout=None, logger_stderr=None):
self.__config_arg_builder = config_arg_builder
self.__startup_db_dev_env = StartupDBDevEnv(
replication_partition_config=config_arg_builder.get_replication_partition_config(),
user_db_config=config_arg_builder.get_user_db_config(),
docker_run_cmd_base_config=config_arg_builder.get_docker_cmd_base_config(),
is_sudo=config_arg_builder.is_sudo(),
logger_stdout=logger_stdout,
logger_stderr=logger_stderr)
def __startup_db_dev_env_call(self):
self.__startup_db_dev_env.start_db_environment(self.__config_arg_builder.get_controller_create_db_container(),
self.__config_arg_builder.get_controller_create_db(),
self.__config_arg_builder.get_controller_create_user())
def __stop_and_remove_db_instances(self):
self.__startup_db_dev_env.stop_and_remove_db_instances()
def execute_command_fnc(self):
pos_arg = self.__config_arg_builder.get_root_command()
if pos_arg == self.__config_arg_builder.get_func_start_env():
self.__startup_db_dev_env_call()
elif pos_arg == self.__config_arg_builder.get_func_stop_env():
self.__stop_and_remove_db_instances()
# class ExecStartupEnv:
# __FNC_START_ENV = "startenv"
# __FNC_STOP_ENV = "stopenv"
#
# def __initialize_configuration(self):
# config_data = self.__config_data
#
# self.__replication_partition_config = ReplicatedPartitionedDBContainerNameBuilder(
# partition_name=config_data["partition_name"],
# total_num_of_partitions=config_data["total_num_of_partitions"],
# replication_name=config_data["replication_name"],
# total_num_of_replications=config_data["total_num_of_replications"],
# )
#
# self.__user_db_config = UserDBConfig(
# user_name=config_data["user_name"],
# password=config_data["password"],
# database_name=config_data["database_name"]
# )
#
# self.__docker_cmd_base_config = DockerCmdBaseConfig(
# image_name=config_data["image_name"],
# external_port_num_begin=config_data["external_port_num_begin"],
# internal_port_num=config_data["internal_port_num"],
# docker_file_path=config_data["docker_file_path"]
# )
#
# def __initialize_controller(self):
# config_data = self.__config_data
# # print(config_data)
# create_db_container_args = config_data["ExecutionController"]["create_db_container"]
# self.__controller_create_db_container = ExecutionController(
# num_of_attempts=create_db_container_args["num_of_attempts"],
# timeout=create_db_container_args["timeout"]
# )
#
# create_db_args = config_data["ExecutionController"]["create_db"]
# self.__controller_create_db = ExecutionController(
# per_attempt_wait_time=create_db_args["per_attempt_wait_time"],
# num_of_attempts=create_db_args["num_of_attempts"]
# )
#
# create_user_args = config_data["ExecutionController"]["create_user"]
# self.__controller_create_user = ExecutionController(
# per_attempt_wait_time=create_user_args["per_attempt_wait_time"],
# num_of_attempts=create_user_args["num_of_attempts"]
# )
#
# def __init__(self, logger_stdout=None, logger_stderr=None, _arg_parse_inst=StartupScriptParser):
# self.__parser_inst = _arg_parse_inst()
# self.__parser_inst.parse_arguments()
# config_path = self.__parser_inst.get_config_path()
#
# with open(config_path) as f:
# self.__config_data = json.load(f)
#
# self.__parser_inst.updated_config_with_args(self.__config_data)
#
# # self.__config_data.update(self.__parser_inst.get_config_data())
#
# is_sudo = self.__config_data["is_sudo"]
# self.__initialize_configuration()
# self.__initialize_controller()
#
# self.__startup_db_dev_env = StartupDBDevEnv(replication_partition_config=self.__replication_partition_config,
# user_db_config=self.__user_db_config,
# docker_run_cmd_base_config=self.__docker_cmd_base_config,
# is_sudo=is_sudo,
# logger_stdout=logger_stdout,
# logger_stderr=logger_stderr)
#
# def __startup_db_dev_env_call(self):
# self.__startup_db_dev_env.start_db_environment(self.__controller_create_db_container,
# self.__controller_create_db, self.__controller_create_user)
#
# def __stop_and_remove_db_instances(self):
# self.__startup_db_dev_env.stop_and_remove_db_instances()
#
# def execute_command_fnc(self):
# pos_arg = self.__parser_inst.get_root_command()
# if pos_arg == ExecStartupEnv.__FNC_START_ENV:
# self.__startup_db_dev_env_call()
# elif pos_arg == ExecStartupEnv.__FNC_STOP_ENV:
# self.__stop_and_remove_db_instances()
if __name__ == "__main__":
exec_startup_env = ExecStartupEnv()
exec_startup_env.execute_command_fnc()
# exec_startup_env.startup_db_dev_env()
# exec_startup_env.stop_and_remove_db_instances()
# parser = argparse.ArgumentParser()
# parser.add_argument('--fo-o', '-fbb')
# parser.add_argument('--bar', '-b')
# parser.add_argument('--ab.c', '-a', action="extend", nargs="+")
# parser.add_argument('xyz', action="extend", nargs="+")
# parser.add_argument('efg', action="extend", nargs="+")
# parser.add_argument('efg2', action="extend", nargs="+")
#
# args = parser.parse_args()
# print(args)
# if args.bar is not None:
# print(int(args.bar) + 10)
# print(args)
# if hasattr(args, 'ab.c'):
# print("hit")
# print(getattr(args, 'ab.c'))
# if args.xyz is not None:
# print(args.xyz)
# print(vars(args))
|
class Card:
#initialize the rank and suit for cards
def __init__(self,rank,suit):
"""rank is an int in the range 1-13 indicating the rank Ace-King,
and siut is a single character "d,""c,""h," or "s" indicating the suit
(dimonds,clubs,herats,or spades).Create the corresponding card."""
#return rank and suit
self.rank = rank
self.suit = suit
#return rank in the main function
def getRank(self):
"""returns the rank of the card"""
return self.rank
#return suit in the main function
def getSuit(self):
"""returns the suit of the card"""
return self.suit
#return card value(rank) in the main function
def BJValue(self):
"""returns the Blackjack value of the card. Ace counts as 1, face cards
count as 10"""
#if the face value of a card is greater or equals to 10
if self.rank >= 10:
#count the value as 10
return 10
#if the face value of a card is less than 10
else:
#return the face value of the card
return self.rank
#return the string name of the card
def __str__(self):
"""returns a string that names the card. For example, "Ace of Spades."""
#create a dictionary of suits
self.suitDic = {"s": "spades","d":"diamonds","h":"hearts","c":"clubs"}
#create a list of ranks
self.rankList = ["", "Ace","Two","Three","Four","Five","Six","Seven","Eight",
"Nine","Ten","Jack","Queen","King"]
self.rank = self.rankList[self.rank]
self.suit = self.suitDic[self.suit]
self.name = self.rank + " of " + self.suit
return self.name
|
import os.path
import math
from os import path
import numpy as np
import scipy
import scipy.optimize
allNames =[
"lizard",
"shiftHappens",
"erato",
"cubes",
"sponza",
"daviaRock",
"rungholt",
"breakfast",
"sanMiguel",
"amazonLumberyardInterior",
"amazonLumberyardExterior",
"amazonLumberyardCombinedExterior",
"gallery",
]
#loop over all possible tables we are intrested in:
#( nameSSEseq4l4bTable.txt ) are the tables we can use.
class storageType:
def __init__(self, nameId, subdivision, branch, leaf, triangleCount, averageBvhDepth,totalTime, computeTime, memoryTime, memoryRelative):
self.branch = branch
self.leaf = leaf
self.nameId = nameId
self.subdivision = subdivision
self.triangleCount = triangleCount
self.averageBvhDepth = averageBvhDepth
self.totalTime = totalTime
self.computeTime = computeTime
self.memoryTime = memoryTime
self.memoryRelative = memoryRelative
class everything:
def __init__(self, workType = 0, gangType = 0):
# 0 = leaf , 1 = node (need to adjust when table change!) (i separate those since i dont want to do a combined performance test since it gets messy quite fast)
self.workType = workType
self.workName = ["Leaf", "Node"]
# 0 = avx, sse = 1
self.gangType = gangType
self.gangName = ["Avx", "Sse"]
self.subdivisionRange = [0, 5]
#nameIds of the tables:
#self.names = [4, 9]
#self.names = [7,8,9,10,11,12]
self.names = [0]
#prefix to the table
#self.prefix = ["SSESeqMemoryLeaf", "SSESeqMemoryNode"]
#self.prefix = ["AVXSeqMemoryLeaf", "AVXSeqMemoryNode"]
self.prefix = self.workName[self.workType] + "Memory" + self.gangName[self.gangType]
self.prefix2 = "Table.txt"
self.outputFolderName = "SavesPerf/Laptop/Summary/"
self.memoryStepSize = 4
#maximum branchingfactor and max leafsite
self.minBranchingFactorList = [[8,2],[4,2]]
self.maxBranchingFactorList = [[8,64],[4,64]]
self.minLeafSizeList = [[1,8],[1,4]]
self.maxLeafSizeList = [[64, 8], [64, 4]]
self.minBranchingFactor = self.minBranchingFactorList[self.gangType][self.workType]
self.maxBranchingFactor = self.maxBranchingFactorList[self.gangType][self.workType]
self.minLeafSize = self.minLeafSizeList[self.gangType][self.workType]
self.maxLeafSize = self.maxLeafSizeList[self.gangType][self.workType]
#number of "," in the line before the number we want
self.dataId = [
2,
3,
6,
7,
]
#names that change with leaf / node
self.dataOutName = [
"leafComputeCost",
"nodeComputeCost"
]
def run(self):
#loop over names:
#arrays that will be used later for tree depth solver stuff
#b,l,scene, datList
#datalist is: [b,l,totaltime, memoryRelative]
storage = [[] for _ in range(len(self.names))]
for loopId, nameId in enumerate(self.names):
name = allNames[nameId]
for s in range(self.subdivisionRange[1] - self.subdivisionRange[0] + 1):
anyFound = False
#loop over b and l
storagePerName = []
for b in range(self.maxBranchingFactor -(self.minBranchingFactor - 1)):
#one empty line after each branching factor
for l in range(self.maxLeafSize - (self.minLeafSize - 1)):
branch = b + self.minBranchingFactor
leaf = l + self.minLeafSize
#open table:
if (self.subdivisionRange[1] == 0):
tableName = "Summary/" + name + "/" + name + self.prefix + "b" + str(branch) + "l" + str(leaf) + self.prefix2
else:
tableName = "Summary/" + name + "Sub" + str(s) + "/" + name + self.prefix + "b" + str(branch) + "l" + str(leaf) + self.prefix2
if (path.exists(tableName)):
if not (anyFound):
if (self.subdivisionRange[1] == 0):
fileName = "Summary/" + name + "/" + name + self.prefix + "ComputeCostTable.txt"
else:
fileName = "Summary/" + name + "Sub" + str(s) + "/" + name + self.prefix + "ComputeCostTable.txt"
fResult = open(fileName, "w+")
firstLine = "branchFactor, leafSize, triangleCount, averageBvhDepth, memorySize, " + self.dataOutName[self.workType] +", memoryCost, " + self.dataOutName[self.workType] + "Norm, memoryCostNorm, memoryRelative"
fResult.write(firstLine + "\n")
anyFound = True
#open file and read important values
f = open(tableName, "r")
if f.mode == 'r':
dataPoints = [[] for z in self.dataId]
dataLeaf = []
dataBranch = []
fiterator = iter(f)
next(fiterator)
for x in fiterator:
split = x.split(", ")
# collect data points:
for i in range(len(self.dataId)):
dataPoints[i].append(float(split[self.dataId[i]]))
dataLeaf.append(float(split[1]))
dataBranch.append(float(split[0]))
#now convert data to np array
y = np.array(dataPoints[self.workType + 2])
if self.workType == 0:
memoryPart = np.array(dataLeaf)
computePart = np.array([float(leaf) for i in range(4)])
else:
memoryPart = np.array(dataBranch)
computePart = np.array([float(branch) for i in range(4)])
A = np.vstack([memoryPart, computePart]).T
result, residual, rank, singular = np.linalg.lstsq(A, y, rcond=None)
computeCost = result[1]
memoryCost = result[0]
normFactor = 1 / (computeCost + memoryCost)
computeNorm = normFactor * computeCost
memoryNorm = normFactor * memoryCost
memoryFactor = memoryCost / computeCost
#store data for second iteration
#storagePerName.append([branch, leaf, dataPoints[0][0], dataPoints[1][0], dataPoints[self.workType + 2][0], computeCost, memoryCost, memoryFactor, name, s])
storagePerName.append(storageType(nameId, s, branch, leaf, dataPoints[0][0], dataPoints[1][0], dataPoints[self.workType + 2][0], computeCost, memoryCost, memoryFactor, ))
"""
#rework version reformed as linear system
A2 = np.vstack([-memoryPart, y]).T
y2 = computePart
result2, residual, rank, singular = np.linalg.lstsq(A2, y2, rcond=None)
res2 = 1 / result2[1]
res = result2[0] * res2
"""
"""
#i keep this here in case i need non linear least squares later
#scipy.optimize.leastsq for non linear least squares.
#good explanation: https://stackoverflow.com/questions/19791581/how-to-use-leastsq-function-from-scipy-optimize-in-python-to-fit-both-a-straight
#n = nodes, pm = padMemory
n = computePart[0]
func = lambda tpl, pm: tpl[0] * (tpl[1] * pm + n)
errorFunc = lambda tpl, pm, y: func(tpl, pm) - y
#initial tupel values
tplInitial = (1.0, 1.0)
#tplFinal,success= scipy.optimize.leastsq(errorFunc,tplInitial[:],args=(memoryPart,y))
#above is the soon depricated version
result= scipy.optimize.least_squares(errorFunc,tplInitial[:],args=(memoryPart,y))
"""
fResult.write(str(branch) + ", " + str(leaf) + ", " + str(memoryPart[0]) + ", " + str(computeCost) + ", " + str(memoryCost) + ", " + str(computeNorm) + ", " + str(memoryNorm) + ", " + str(memoryFactor) + "\n")
if len(storagePerName) != 0:
storage[loopId].append(storagePerName)
if anyFound:
fResult.close()
#now loop over the different scenes and do analysis depending on tree depth
for b in range(self.maxBranchingFactor -(self.minBranchingFactor - 1)):
#one empty line after each branching factor
for l in range(self.maxLeafSize - (self.minLeafSize - 1)):
branch = b + self.minBranchingFactor
leaf = l + self.minLeafSize
memoryRelative = []
totalTime = []
triangleCount = []
averageBvhDepth = []
anyFound = False
for sceneStorage in storage:
for subStorage in sceneStorage:
for s in subStorage:
if s.branch == branch and s.leaf == leaf:
if not anyFound:
anyFound = True
triangleCount.append(s.triangleCount)
averageBvhDepth.append(s.averageBvhDepth)
totalTime.append(s.totalTime)
memoryRelative.append(s.memoryRelative)
if anyFound:
fResult.close()
#calculates the relative memory depending on tri count
#memoryRelative = x * triCount
ones = np.ones(len(memoryRelative))
#A = np.vstack([triangleCount]).T
A = np.vstack([averageBvhDepth]).T
result, residual, rank, singular = np.linalg.lstsq(A, memoryRelative, rcond=None)
if (residual > 0.1):
print("residual a bit high")
#calculates the influence of the tri count to total time. (assumes linear function, not sure if it is -> residual seems quite high?)
#totalTime = x * triCount * memoryRelative + x * triCount
#for calculating i use : totalTime /(triangleCount * (memoryRelative + 1)) = x
m = np.array(memoryRelative)
#t = np.array(triangleCount)
t = np.array(averageBvhDepth)
cT = np.array(totalTime)
y = cT / (t * (m + 1))
A = np.vstack([ones]).T
result2, residual2, rank, singular = np.linalg.lstsq(A, y, rcond=None)
breakpointHolder = 0
#TODO: test different things to above version (non linear function or else, not sure yet)
#(also) try to normalize with real intersection count of the scene? not sure what to expect with this one but i want to know how it look
#current idea: render the same scene with different subdivision settings. (so once render the scene with x triangles, x*2, x*3, x*4 ,...)
#might be able to calculate scene/camrea complexity with this?
#TODO: output
#what i want from output:
#output per N and B combination.
#put name in it and tri count / average bvh depth so i can show graphs for each scene
#final iteration over storage and print one file for each branching factor / leafsize (for node or leaf tests)
for b in range(self.maxBranchingFactor -(self.minBranchingFactor - 1)):
#one empty line after each branching factor
for l in range(self.maxLeafSize - (self.minLeafSize - 1)):
branch = b + self.minBranchingFactor
leaf = l + self.minLeafSize
anyFound = False
for sceneStorage in storage:
for subStorage in sceneStorage:
for s in subStorage:
if s.branch == branch and s.leaf == leaf:
if not anyFound:
anyFound = True
#overfiew over multiple scenes:
fileName = "Summary/" + self.prefix + "Perf_N" + str(branch) +"L" + str(leaf) + ".txt"
fResult = open(fileName, "w+")
firstLine = "name, nameId, subdivision, triangleCount, averageBvhDepth, totalTime, computeTime, memoryTime , memoryRelative"
fResult.write(firstLine + "\n")
#TODO: aupdate when second part is done
line = self.makeLine([allNames[s.nameId], s.nameId, s.subdivision, s.triangleCount, s.averageBvhDepth, s.totalTime, s.computeTime, s.memoryTime, s.memoryRelative])
fResult.write( line + "\n")
def makeLine(self, array):
line = "" + str(array[0])
for element in array[1:]:
line += ", " + str(element)
return line
doAll = True
# 0 = leaf , 1 = node (need to adjust when table change!) (i separate those since i dont want to do a combined performance test since it gets messy quite fast)
workType = 0
# 0 = avx, sse = 1
gangType = 0
if doAll:
for i in range(2):
for j in range(2):
program = everything(i,j)
program.run()
else:
program = everything(workType, gangType)
program.run()
|
class YourGuesser(Guesser):
def analyzeOne():
|
'''
Created on Dec 20, 2016
@author: bogdan
'''
import unittest
from repo.repository import *
class TestRepo(unittest.TestCase):
def setUp(self):
self.__driversRepo=DriverRepository()
self.__ordersRepo=OrderRepository()
def test_addDriver(self):
self.__driversRepo.add("1","1")
self.assertEquals(len(self.__driversRepo),1)
def test_addOrder(self):
self.__ordersRepo.add("1",2)
self.assertEquals(len(self.__ordersRepo),1) |
"""
1.路由命名规范: 返回html页面的,全部以/html开头;
返回json的接口,工具类以test开头、爬虫类以spider开头、其他的以PEP8为准
"""
from flask import Flask, render_template, redirect, url_for
from config import DevConfig
from flask_cors import CORS
import configparser
import pymongo
import redis
from selenium import webdriver
from lxml import etree
from flask import request
import json
import os
from celery import Celery
from urllib.parse import quote
from exts import db
from models import PolicySpiderUrlInfo, PolicySpiderTaskInfo, PolicyDataAnalysisRules, PolicySpiderDataInfo
import datetime
from flask_json import as_json, json_response
import requests
import time
import urllib
import re
import pymysql
app = Flask(__name__)
app.config.from_object(DevConfig)
db.init_app(app)
celery = Celery(app.name, broker=DevConfig.CELERY_BROKER_URL)
cf = configparser.ConfigParser()
# configparser 用以读写配置文件
cf.read('DatabaseConfig.ini', encoding='utf-8')
cf_mysql_name = "MYSQL_TEST"
# mysql
mysql_db = cf.get(cf_mysql_name, 'mysql_db')
mysql_host = cf.get(cf_mysql_name, 'mysql_host')
mysql_port = cf.get(cf_mysql_name, 'mysql_port')
mysql_user = cf.get(cf_mysql_name, 'mysql_user')
mysql_passwd = cf.get(cf_mysql_name, 'mysql_password')
# mongo
cf_mongo_name = "MONGODB_TEST"
mongo_host = cf.get(cf_mongo_name, 'mongo_host')
mongo_port = cf.get(cf_mongo_name, 'mongo_port')
mongo_db = cf.get(cf_mongo_name, 'mongo_db')
mongo_table = cf.get(cf_mongo_name, 'mongo_table')
mongo_user = cf.get(cf_mongo_name, 'mongo_user')
mongo_password = cf.get(cf_mongo_name, 'mongo_password')
cf_redis_name = "REDIS_TEST"
# redis
redis_db = cf.get(cf_redis_name, 'redis_db')
redis_host = cf.get(cf_redis_name, 'redis_host')
redis_port = cf.get(cf_redis_name, 'redis_port')
redis_password = cf.get(cf_redis_name, 'redis_password')
print(os.path.split(os.path.abspath('.'))[-1])
orderno = 'ZF20191080074lV0fOf'
secret = 'ac66770b075947eca6cb2f09dc776d00'
# 执行sql
def excute_sql(sql):
mysql_conn = pymysql.connect(mysql_host, mysql_user, mysql_passwd, mysql_db, int(mysql_port), charset="utf8")
mysql_cur = mysql_conn.cursor()
mysql_cur.execute(sql)
mysql_conn.commit()
mysql_cur.close()
mysql_conn.close()
# 代理ip,身份验证签名生成
def generate_proxy_auth():
import hashlib
import time
timestamp = str(int(time.time()))
string = 'orderno=' + orderno + ',' + 'secret=' + secret + ',' + 'timestamp=' + timestamp
string = string.encode()
md5_string = hashlib.md5(string).hexdigest()
sign = md5_string.upper()
auth = 'sign=' + sign + '&' + 'orderno=' + orderno + '&' + 'timestamp=' + timestamp
return auth
# 使用chromeDriver获取网页内容
def get_content(list_url, extension, time_delay=5):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(list_url)
time.sleep(int(time_delay))
if extension is not None and extension != '':
eval(extension)
content = driver.page_source
print(content)
driver.close()
return content
# 使用requests获取网页html数据
def get_content_request(list_url):
auth = generate_proxy_auth()
proxy = {'http': 'http://forward.xdaili.cn:80'}
headers = {'Proxy-Authorization': auth,
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'}
r = requests.get(list_url, headers=headers,
proxies=proxy, verify=False, allow_redirects=False)
r.encoding = 'UTF-8'
print(r.text)
return r.text
# 提取数据测试接口
@app.route('/api/test/analysis_data', methods=['GET', 'POST'])
@as_json
def data_test():
if request.method == 'POST':
data = json.loads(request.get_data())
url = data['url']
rule_type = int(data['rule_type'])
rule = data['rule']
html = get_content_request(url)
page = etree.HTML(html)
if rule_type == 2:
result = page.xpath(rule)
else:
result = re.findall(html, rule)
return json_response(result=result, msg='success')
else:
return json_response(status_=405, msg='fail', error_description='Wrong request method!')
# 爬虫阶段一,点击下一页测试工具
@app.route('/api/test/next_page', methods=['GET', 'POST'])
@as_json
def next_page_test():
time_delay = 3
if request.method == 'POST':
data = json.loads(request.get_data())
start_url = data['start_url']
rule_type = int(data['rule_type'])
rules_next_page = data['rules_next_page']
extension = data['extension']
if rule_type == 2:
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(start_url)
time.sleep(int(time_delay))
if extension is not None and extension != '':
eval(extension)
try:
driver.find_element_by_partial_link_text(rules_next_page).click()
time.sleep(time_delay)
print(driver.page_source)
return json_response(result=1, msg='success')
except Exception as e:
print(e)
try:
driver.switch_to.frame(0)
driver.find_element_by_partial_link_text(rules_next_page).click()
return json_response(result=1, msg='success')
except Exception as e:
print(e)
return json_response(result=0, msg='success')
return json_response(result=0, msg='success')
finally:
driver.close()
else:
pass
else:
return json_response(status_=405, msg='fail', error_description='Wrong request method!')
# 提取数据测试接口
@app.route('/api/test/get_urls', methods=['GET', 'POST'])
@as_json
def urls_test():
if request.method == 'POST':
data = json.loads(request.get_data())
url = data['url']
rule_type = int(data['rule_type'])
rule = data['rule']
extension = data['extension']
html = get_content(url,extension)
page = etree.HTML(html)
if rule_type == 2:
result = page.xpath(rule)
else:
result = re.findall(html, rule)
return json_response(result=[result], msg='success')
else:
return json_response(status_=405, msg='fail', error_description='Wrong request method!')
# 获取已经爬取的详情页url的数量
@app.route('/api/count/urls')
def get_detail_url_count():
task_id = int(request.args.get('task_id') or 1)
count = PolicySpiderUrlInfo.query.filter_by(task_id=task_id).count()
return json_response(count=str(count))
def get_detail_url_count(task_id):
# task_id = int(request.args.get('task_id') or 1)
count = PolicySpiderUrlInfo.query.filter_by(task_id=task_id).count()
return str(count)
# 删除某一task的所有的详情页url
@app.route('/api/delete/urls')
def del_detail_url():
task_id = int(request.args.get('task_id') or 0)
policy_spider_url_list = PolicySpiderUrlInfo.query.filter_by(task_id=task_id).all()
for item in policy_spider_url_list:
db.session.delete(item)
db.session.commit()
return json_response(msg='success')
# 删除mongo中的数据
@app.route('/api/delete/mongo')
def del_mongo_pages():
task_id = int(request.args.get('task_id') or 0)
mongo_client = pymongo.MongoClient(host=mongo_host, port=int(mongo_port))
db_auth = mongo_client.admin
db_auth.authenticate(mongo_user, mongo_password)
mongo_db_ = mongo_client[mongo_db]
mongo_table_ = mongo_db_[mongo_table]
mongo_table_.remove({"task_id": task_id})
mongo_client.close()
return json_response(msg='success')
# 获取mongodb中已经采集的数据的数量
@app.route('/api/count/mongo')
def get_mongo_count():
task_id = int(request.args.get('task_id') or 0)
mongo_client = pymongo.MongoClient(host=mongo_host, port=int(mongo_port))
db_auth = mongo_client.admin
db_auth.authenticate(mongo_user, mongo_password)
mongo_db_ = mongo_client[mongo_db]
mongo_table_ = mongo_db_[mongo_table]
mongo_row = mongo_table_.count({"task_id": task_id})
mongo_client.close()
return json_response(count=str(mongo_row))
def get_mongo_count(task_id):
mongo_client = pymongo.MongoClient(host=mongo_host, port=int(mongo_port))
db_auth = mongo_client.admin
db_auth.authenticate(mongo_user, mongo_password)
mongo_db_ = mongo_client[mongo_db]
mongo_table_ = mongo_db_[mongo_table]
mongo_row = mongo_table_.count({"task_id": task_id})
mongo_client.close()
return str(mongo_row)
# 获取redis中剩余的任务数量
@app.route('/api/count/redis')
def get_redis_count():
task_id = int(request.args.get('task_id') or 0)
r = redis.StrictRedis(host=redis_host, port=int(redis_port), db=int(redis_db), password=redis_password)
length = r.llen(str(task_id))
return json_response(count=str(length))
# 数据采集数量监控接口
@app.route('/api/monitor/data')
def monitor():
task_id = int(request.args.get('task_id') or 0)
mongo_count = get_mongo_count(task_id)
url_count = get_detail_url_count(task_id)
data_count = PolicySpiderDataInfo.query.filter_by(task_id=task_id).count()
data = dict(mongo_count=int(mongo_count),
url_count=int(url_count),
data_count=int(data_count))
return json_response(data=data, msg='success')
# 数据分析规则删除接口
@app.route('/api/delete/rule')
def delete_rule():
id_ = int(request.args.get('id_') or 0)
try:
rule = PolicyDataAnalysisRules.query.filter_by(id_=id_).first()
db.session.delete(rule)
db.session.commit()
except Exception as e:
print(e)
return json_response(msg='fail')
return json_response(msg='success')
# 数据分析规则录入接口
@app.route('/api/add/rule', methods=['POST', 'GET'])
@as_json
def add_rule():
if request.method == "POST":
data = json.loads(request.get_data())
rule_ = PolicyDataAnalysisRules()
rule_.task_id = data['task_id']
rule_.meaning = data['meaning']
rule_.rule_name = data['rule_name']
rule_.rule_type = data['rule_type']
rule_.rule = data['rule']
rule_.insert_time = datetime.datetime.now()
try:
db.session.add(rule_)
db.session.commit()
except Exception as e:
print(e)
return json_response(msg='fail', error_description=str(e))
return json_response(msg='success')
return json_response(status_=405, msg='fail', error_description='Wrong request method!')
# 爬虫类型选择接口
@app.route('/api/spider/choice', methods=['GET', 'POST'])
@as_json
def spider_choice():
task_id = int(request.args.get('task_id') or 0)
if request.method == 'POST':
data = json.loads(request.get_data())
policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
policy_spider_task_info.spider_type = int(data['spider_type'])
if data['spider_type'] == '1':
policy_spider_task_info.urls = data['urls']
elif data['spider_type'] == '2':
policy_spider_task_info.start_url = data['start_url']
policy_spider_task_info.rules_next_page = data['rules_next_page']
policy_spider_task_info.extension_1 = data['extension']
else:
policy_spider_task_info.ajax_url = data['ajax_url']
policy_spider_task_info.ajax_data = data['ajax_data']
policy_spider_task_info.rules_url = data['rules_url']
policy_spider_task_info.url_head = data['url_head']
db.session.add(policy_spider_task_info)
db.session.commit()
change_task_state(task_id,0)
return json_response(msg='success')
else:
return json_response(status_=405, msg='fail', error_description='Wrong request method!')
# 改变爬虫状态(state)
# def change_task_state(task_id, state):
# policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
# policy_spider_task_info.state = state
# db.session.add(policy_spider_task_info)
# db.session.commit()
def change_task_state(task_id, state):
sql = "UPDATE policy_spider_task_info SET state = %s WHERE task_id = %s"%(state,task_id)
excute_sql(sql)
# 爬虫一celery任务
@celery.task(name="celery_start_spider1")
def celery_start_spider1(task_id):
if os.path.split(os.path.abspath('.'))[-1] == 'spiderTools':
os.chdir("Spider1/DataSpiders/DataSpiders")
print(os.path.dirname(__file__))
os.system("python3 Spider1Run.py %s" % task_id)
print(os.path.dirname(__file__))
if os.path.split(os.path.abspath('.'))[-1] == 'DataSpiders':
os.chdir("../")
os.chdir("../")
os.chdir("../")
change_task_state(task_id, 3)
# 爬虫二(爬虫阶段二)celery任务
@celery.task(name="celery_start_spider2")
def celery_start_spider2(task_id):
print(os.path.dirname(__file__))
if os.path.split(os.path.abspath('.'))[-1] == 'spiderTools':
os.chdir("Spider2/DataSpiders/DataSpiders")
print(os.path.dirname(__file__))
os.system("python3 Spider2Run.py %s" % task_id)
if os.path.split(os.path.abspath('.'))[-1] == 'DataSpiders':
os.chdir("../")
os.chdir("../")
os.chdir("../")
change_task_state(task_id, 4)
# 爬虫二(爬虫阶段二)补充celery任务
@celery.task(name="celery_start_spider2_add")
def celery_start_spider2_add(task_id):
print(os.path.dirname(__file__))
if os.path.split(os.path.abspath('.'))[-1] == 'spiderTools':
os.chdir("Spider2/DataSpiders/DataSpiders")
print(os.path.dirname(__file__))
os.system("python3 Spider2Run_add.py %s" % task_id)
if os.path.split(os.path.abspath('.'))[-1] == 'DataSpiders':
os.chdir("../")
os.chdir("../")
os.chdir("../")
change_task_state(task_id, 4)
# 爬虫三(爬虫阶段一,使用点击下一页获取详情页url)celery任务
@celery.task(name='celery_start_spider3')
def celery_start_spider3(task_id, policy_spider_task_info):
content_list = get_all_content(policy_spider_task_info)
url_list = analysis_content(content_list=content_list, policy_spider_task_info=policy_spider_task_info)
insert_url(policy_spider_task_info=policy_spider_task_info, url_list=url_list)
change_task_state(task_id, 3)
# 数据解析celery任务
@celery.task(name="analysis_data")
def analysis_data(task_id):
print(os.path.dirname(__file__))
if os.path.split(os.path.abspath('.'))[-1] == 'spiderTools':
os.chdir("data_analysis/")
print(os.path.dirname(__file__))
os.system("python3 DataAnalysis.py %s" % task_id)
if os.path.split(os.path.abspath('.'))[-1] == 'data_analysis':
os.chdir("../")
change_task_state(task_id, 6)
# 爬虫阶段一启动接口
@app.route('/api/celery/spider1')
def start_spider1():
task_id = int(request.args.get('task_id') or 0)
policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
if policy_spider_task_info.state < 0:
return json_response(msg='fail')
if policy_spider_task_info.spider_type == 1:
change_task_state(task_id, 1)
with app.app_context():
celery_start_spider1.delay(task_id)
return json_response(msg='success', task_id=str(task_id))
elif policy_spider_task_info.spider_type == 2:
change_task_state(task_id, 1)
policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
info_dict = dict(start_url=policy_spider_task_info.start_url,
task_id=policy_spider_task_info.task_id,
rules_next_page=policy_spider_task_info.rules_next_page,
url_head=policy_spider_task_info.url_head,
rules_url=policy_spider_task_info.rules_url,
type1=policy_spider_task_info.type1,
type2=policy_spider_task_info.type2,
type3=policy_spider_task_info.type3,
type4=policy_spider_task_info.type4,
type5=policy_spider_task_info.type5,
extension_1=policy_spider_task_info.extension_1)
with app.app_context():
celery_start_spider3.delay(task_id, info_dict)
return json_response(msg='success', task_id=str(task_id))
elif policy_spider_task_info.spider_type == 3:
change_task_state(task_id, 1)
with app.app_context():
celery_start_spider1.delay(task_id)
return json_response(msg='success', task_id=str(task_id))
else:
return json_response(msg='fail')
# 爬虫阶段二启动接口
@app.route('/api/celery/spider2')
def start_spider2():
task_id = int(request.args.get('task_id') or 0)
policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
if policy_spider_task_info.state < 0:
return json_response(msg='fail')
change_task_state(task_id, 2)
with app.app_context():
celery_start_spider2.delay(task_id)
return json_response(msg='success', task_id=str(task_id))
# 爬虫阶段二补充采集接口
@app.route('/api/celery/spider2add')
def start_spider2_add():
task_id = int(request.args.get('task_id') or 0)
change_task_state(task_id, 2)
with app.app_context():
celery_start_spider2_add.delay(task_id)
return str(task_id)
# 数据解析启动接口
@app.route('/api/celery/analysis')
def data_analysis():
task_id = int(request.args.get('task_id') or 0)
policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
if policy_spider_task_info.state < 4:
return json_response(msg='fail')
change_task_state(task_id, 5)
with app.app_context():
analysis_data.delay(task_id)
return json_response(msg='success', task_id=str(task_id))
# # 查询数据采集规则的接口
# @app.route('/api/show/rules')
# def show_rules():
# task_id = int(request.args.get('task_id') or 0)
# rules_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
# data = dict(
# rules_organization=rules_info.rules_organization,
# rules_subject=rules_info.rules_subject,
# rules_keywords=rules_info.rules_keywords,
# rules_file_number=rules_info.rules_file_number,
# rules_create_date=rules_info.rules_create_date,
# rules_release_date=rules_info.rules_release_date,
# rules_enforcement_date=rules_info.rules_enforcement_date,
# rules_index_number=rules_info.rules_index_number,
# rules_author=rules_info.rules_author)
# return json_response(data=data)
# spider3的爬虫第一阶段
# 因为celery序列化的限制,不能传递自定义的类, 传递转化后的字典
def get_all_content(policy_spider_task_url, time_delay=5):
start_url = policy_spider_task_url['start_url']
rules_next_page = policy_spider_task_url['rules_next_page']
extension_1 = policy_spider_task_url['extension_1']
content_list = []
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(start_url)
time.sleep(int(time_delay))
try:
if extension_1 is not None and extension_1 != '':
eval(extension_1)
while True:
content = driver.page_source
content_list.append(content)
if(len(content_list)>50):
if(content_list[-1] == content_list[-2]):
break
driver.find_element_by_partial_link_text(rules_next_page).click()
time.sleep(int(time_delay))
except Exception as e:
print(e)
finally:
driver.close()
return content_list
# 分析抓取到的html代码
def analysis_content(content_list, policy_spider_task_info):
url_list = []
url_head = policy_spider_task_info['url_head']
rules_url = policy_spider_task_info['rules_url']
for content in content_list:
page = etree.HTML(content)
url_list = url_list + page.xpath(rules_url)
if url_head != '' or url_head is not None:
for i in range(0, len(url_list)):
url_list[i] = urllib.parse.urljoin(url_head, url_list[i])
return url_list
# 将解析出的url插入数据库
def insert_url(policy_spider_task_info, url_list):
task_id = policy_spider_task_info['task_id']
type1 = policy_spider_task_info['type1']
type2 = policy_spider_task_info['type2']
type3 = policy_spider_task_info['type3']
type4 = policy_spider_task_info['type4']
type5 = policy_spider_task_info['type5']
from_url = ''
for url in url_list:
policy_spider_url_info = PolicySpiderUrlInfo()
policy_spider_url_info.task_id = task_id
policy_spider_url_info.url = url
policy_spider_url_info.type1 = type1
policy_spider_url_info.type2 = type2
policy_spider_url_info.type3 = type3
policy_spider_url_info.type4 = type4
policy_spider_url_info.type5 = type5
policy_spider_url_info.from_url = from_url
policy_spider_url_info.insert_time = datetime.datetime.now()
with app.app_context():
db.session.add(policy_spider_url_info)
db.session.commit()
@app.route('/')
def index():
return redirect(url_for("index_html"))
# 主页
@app.route('/index')
def index_html():
page = int(request.args.get('page') or 1)
page_obj = PolicySpiderTaskInfo.query.paginate(page, 10)
return render_template('index.html', pagination=page_obj)
# 工具选择页面
@app.route('/tools')
def spider_tools():
return render_template('tools.html')
# 数据查询工具
@app.route('/tools/sql')
def tools_sql_html():
return render_template("tools_sql.html")
# 数据分析规则测试工具页面
@app.route('/tools/data/test')
def test_data_html():
return render_template("tools_data_test.html")
# 爬虫阶段一获取urls测试页面
@app.route('/tools/urls/test')
def test_urls_html():
return render_template("tools_urls_test.html")
# 爬虫阶段一获取urls测试页面
@app.route('/tools/next/test')
def test_next_html():
return render_template("tools_next_page_test.html")
# 爬虫启动工具
@app.route('/tools/spider/start')
def start_spider1_html():
return render_template("tools_spider_start.html")
# 详情页url查询工具
@app.route('/tools/show/urls')
def show_urls_tools():
return render_template("tools_show_urls.html")
# 任务监控页面
@app.route('/tools/monitor/task')
def monitor_task_tools():
page_obj = PolicySpiderTaskInfo.query.filter_by(type2="组成部门").all()
page_obj = {"data": page_obj}
return render_template("tools_monitor.html", pageObj=page_obj)
# 爬虫启动规则配置页面
@app.route('/spider/choice')
def spider_choice_html():
task_id = int(request.args.get('task_id') or 0)
policy_spider_task_info = PolicySpiderTaskInfo.query.filter_by(task_id=task_id).first()
return render_template('spider_choice.html', psti=policy_spider_task_info)
# 爬虫启动页面
@app.route('/spider/start')
def start_spider_html():
task_id = int(request.args.get('task_id') or 0)
return render_template('spider_start.html', task_id=task_id)
# 已采集详情页url页面
@app.route('/show/urls')
def get_detail_urls():
task_id = int(request.args.get('task_id') or 0)
# 传递的页码数量
page = int(request.args.get('page') or 1)
# 页码page,每页显示10条
count = PolicySpiderUrlInfo.query.filter_by(task_id=task_id).count()
page_obj = PolicySpiderUrlInfo.query.filter_by(task_id=task_id).paginate(page, 10)
return render_template('show_urls.html',
pagination=page_obj, task_id=task_id, count=count)
# 数据分析规则录入页面
@app.route('/show/analysis/rules')
def get_rules():
task_id = int(request.args.get('task_id') or 1)
rules_info = PolicyDataAnalysisRules.query.filter_by(task_id=task_id).all()
return render_template('show_analysis_rules.html', data=rules_info, task_id=task_id)
@app.route('/tools/generate/urls')
def generate_urls():
return render_template('tools_generate_urls.html')
if __name__ == '__main__':
CORS(app, supports_credentials=True)
app.run(host='0.0.0.0', port=5000)
|
class Valores:
def __init__(self):
self.__id= 0
self.__valor_trans_id=0
self.__valor=0
@property
def id (self):
return self.__id
@id.setter
def id(self,id):
self.__id = id
@property
def valores_trans_id(self):
return self.__valor_trans_id
@valores_trans_id.setter
def valores_trans_id(self,valor_trans_id):
self.__valor_trans_id = valor_trans_id
@property
def valores(self):
return self.__valor
@valores.setter
def valores(self,valor):
self.__valor = valor
|
# Copyright (c) 2012 Stuart Pernsteiner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .binaryreader import BinaryReader
from .psd_primitive import PSDPrimitiveReaderMixin
class PSDDescriptorReaderMixin(object):
def read_descriptor(self):
name = self.read_psd_unicode(2)
class_id = self.read_descriptor_id()
item_count = self.read_int(4)
result = {}
for i in xrange(item_count):
key = self.read_descriptor_id()
item_type = self.read_raw(4)
result[key] = self.read_descriptor_item(item_type)
return result
def read_descriptor_id(self):
id_length = self.read_int(4)
if id_length != 0:
return self.read_raw(id_length)
else:
return self.read_raw(4)
def read_descriptor_item(self, item_type):
func = 'read_di_%s' % item_type.strip()
assert hasattr(self, func), \
"don't know how to read descriptor item %s" % item_type
return getattr(self, func)()
def read_di_Objc(self):
return self.read_descriptor()
def read_di_doub(self):
return self.read_double()
def read_di_UntF(self):
units = self.read_raw(4)
value = self.read_double()
return {'units': units, 'value': value}
def read_di_bool(self):
return self.read_int(1) != 0
def read_di_enum(self):
type = self.read_descriptor_id()
enum = self.read_descriptor_id()
return {
'type': type,
'enum': enum,
}
def read_di_TEXT(self):
return self.read_psd_unicode(2)
def read_di_VlLs(self):
count = self.read_int(4)
results = []
for i in xrange(count):
item_type = self.read_raw(4)
results.append(self.read_descriptor_item(item_type))
return results
def read_di_long(self):
return self.read_int(4)
|
class UrlTable:
def __init__(self, urls):
self.todo_list = []
self.all_url = {}
for url in urls:
self.all_url[url] = 0 |
#dependencies to create flask app and machine learning model
From flask import flask, render_template, request
App = Flask(__name__)
#if we create our own model can use pickle.
Model_TSLA = pickle.load(open(‘test_model.pkl’,’rb’))
#create route for home route
@app.route("/")
def home():
return render_template('index.html')
#IF CREATING OWN MODEL.
create route for prediction to appear on Tesla.html
@app.route('/predict',methods=['POST'])
def predict():
predicition = mdoel_TSLA.predict()
output = round(prediciton[0],.2)
return render_template('Tesla.html',predicition = Tesla Stock Price is: {}.format(output))
If __name__ == "__main__"
app.run(debug= True)
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import twisted
from sqlalchemy import engine
from twisted.internet import reactor, threads, defer
from twisted.python import threadpool, failure, versions
class DBThreadPool(threadpool.ThreadPool):
"""
A pool of threads ready and waiting to execute queries.
If the engine has an @C{optimal_thread_pool_size} attribute, then the
maxthreads of the thread pool will be set to that value. This is most
useful for SQLite in-memory connections, where exactly one connection
(and thus thread) should be used.
"""
running = False
def __init__(self, engine):
pool_size = 5
if hasattr(engine, 'optimal_thread_pool_size'):
pool_size = engine.optimal_thread_pool_size
threadpool.ThreadPool.__init__(self,
minthreads=1,
maxthreads=pool_size,
name='DBThreadPool')
self.engine = engine
self._start_evt = reactor.callWhenRunning(self._start)
def _start(self):
self._start_evt = None
if not self.running:
self.start()
self._stop_evt = reactor.addSystemEventTrigger(
'during', 'shutdown', self._stop)
self.running = True
def _stop(self):
self._stop_evt = None
self.stop()
self.engine.dispose()
self.running = False
def do(self, callable, *args, **kwargs):
"""
Call CALLABLE in a thread, with a Connection as first argument.
Returns a deferred that will indicate the results of the callable.
Note: do not return any SQLAlchemy objects via this deferred!
"""
def thd():
conn = self.engine.contextual_connect()
rv = callable(conn, *args, **kwargs)
assert not isinstance(rv, engine.ResultProxy), \
"do not return ResultProxy objects!"
return rv
return threads.deferToThreadPool(reactor, self, thd)
def do_with_engine(self, callable, *args, **kwargs):
"""
Like l{do}, but with an SQLAlchemy Engine as the first argument
"""
def thd():
conn = self.engine
rv = callable(conn, *args, **kwargs)
assert not isinstance(rv, engine.ResultProxy), \
"do not return ResultProxy objects!"
return rv
return threads.deferToThreadPool(reactor, self, thd)
# older implementations for twisted < 0.8.2, which does not have
# deferToThreadPool; this basically re-implements it, although it gets some
# of the synchronization wrong - the thread may still be "in use" when the
# deferred fires in the parent, which can lead to database accesses hopping
# between threads. In practice, this should not cause any difficulty.
def do_081(self, callable, *args, **kwargs):
d = defer.Deferred()
def thd():
try:
conn = self.engine.contextual_connect()
rv = callable(conn, *args, **kwargs)
assert not isinstance(rv, engine.ResultProxy), \
"do not return ResultProxy objects!"
reactor.callFromThread(d.callback, rv)
except:
reactor.callFromThread(d.errback, failure.Failure())
self.callInThread(thd)
return d
def do_with_engine_081(self, callable, *args, **kwargs):
d = defer.Deferred()
def thd():
try:
conn = self.engine
rv = callable(conn, *args, **kwargs)
assert not isinstance(rv, engine.ResultProxy), \
"do not return ResultProxy objects!"
reactor.callFromThread(d.callback, rv)
except:
reactor.callFromThread(d.errback, failure.Failure())
self.callInThread(thd)
return d
if twisted.version < versions.Version('twisted', 8, 2, 0):
do = do_081
do_with_engine = do_with_engine_081
|
import pymysql
class Mysql(object):
def __init__(self):
"""连接数据库,获取conn"""
self.mysql_host = "127.0.0.1" # 数据库ip
self.mysql_db = "tw" # 数据库名称,要提前在mysql中创建好数据库
self.mysql_user = "root" # 数据库登录账号
self.mysql_password = "123456" # 数据库登录密码
self.mysql_port = 3306 # 数据库端口号
# self.sql = sql
# 下面这个sql语句是用来创建mysql数据表的,用来记录注册账户信息
# "CREATE TABLE id_info (id INT AUTO_INCREMENT PRIMARY KEY,name VARCHAR(20), company_name VARCHAR(20),phone_number VARCHAR(20), mobile_number VARCHAR(20),key_products VARCHAR(20), country_id INT(3),email VARCHAR(20), password VARCHAR(20),messenger_type VARCHAR(20), messenger_id VARCHAR(20));"
# 下面这个sql语句是用来创建商品类目表的,记录网站上提供的商品分类
# CREATE TABLE ind_info (id INT(10),name VARCHAR(20));
self.conn = pymysql.connect(host=self.mysql_host, port=self.mysql_port, user=self.mysql_user, password=self.mysql_password,
db=self.mysql_db, charset='UTF8MB4')
self.cur = self.conn.cursor()
def get_all(self, sql):
try:
self.cur.execute(sql)
except Exception as e:
print(e)
result = self.cur.fetchall()
return result # 结果是元组
def insert_into(self, sql, args):
try:
self.cur.execute(sql, args)
except Exception as e:
print(e)
self.conn.rollback()
self.conn.commit()
def close(self):
self.cur.close()
self.conn.close() |
from django import template
register = template.Library()
@register.filter
def index(my_list: list, i: int):
return my_list[int(i)]
|
#******************************************************************************
#
#"Distribution A: Approved for public release; distribution unlimited. OPSEC #4046"
#
#PROJECT: DDR
#
# PACKAGE :
# ORIGINAL AUTHOR :
# MODIFIED DATE :
# MODIFIED BY :
# REVISION :
#
# Copyright (c) 2020 DCS Corporation
#
# Unlimited Rights assigned to the U.S. Government
#
# This material may be reproduced by or for the U.S Government pursuant
# to the copyright license under the clause at DFARS 252.227-7013. This
# notice must appear in all copies of this file and its derivatives.
#******************************************************************************
#
#Copyright (c) 2019-2020 U.S. Federal Government (in countries where recognized)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
#ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
#DEALINGS IN THE SOFTWARE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import time
import os
from pathlib import Path
import rospy
import roslaunch
import rospkg
import json
from std_msgs.msg import String
class AutomatedTesting():
def __init__(self):
self.uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
self.launch = roslaunch.scriptapi.ROSLaunch()
rospack = rospkg.RosPack()
ddr_gui_launch = (rospack.get_path("ddr_data_recorder")
+ "/launch/ddrGui.launch")
self.launch.parent = roslaunch.parent.ROSLaunchParent(
self.uuid, [ddr_gui_launch])
self.start_ddr()
self.test_event = rospy.get_param("testEvent")
self.kml = rospy.get_param("topicXML")
self.bags_dir = rospy.get_param("directory")
self.init_dir = self.bags_dir + "/"
self.active_bag = ""
self.ddr_dir = Path(self.init_dir).expanduser()
self.pub = rospy.Publisher('/ddr/api', String, queue_size=10)
## Runs an ls command on a given directory defaults to current directory.
# @param files list List of strings of file names we're expecting.
# @param directory str The directory we want to check.
def check_files(self, files=None, directory=''):
if files is None:
files = set()
output = set(subprocess.check_output('ls ' + directory,
shell=True).split())
if files == output:
return 'Directory matches expected output.'
if files.issuperset(output):
return 'Program\'s output is smaller than expected.'
if output.issuperset(files):
return 'Program\'s output is larger than expected.'
return 'error'
## Publishes a GUI event message
# @param id_val str
# @param type_val str
# @param dynamic_record bool
# @param shadow_record bool
# @param sleep_time float The amount of time to sleep after executing the
# command.
def publish_gui_event(self, id_val='', type_val='', dynamic_record='false',
shadow_record='false', sleep_time=0.1, alert_msg=''):
subprocess.Popen('rostopic pub -1 /ddr/event ' +
'ddr_data_recorder/Event "header:\n' +
' seq: 0\n stamp: {secs: 0, nsecs: 0}\n ' +
'frame_id: \'\'\n' +
'eventID: \'' + id_val + '\'\n' +
'eventType: \'' + type_val + '\'\n' +
'dynamicRecord: ' + dynamic_record + '\n' +
'shadowRecord: ' + shadow_record + '\n' +
'alertMsg: ' + alert_msg + '"', shell=True)
time.sleep(sleep_time)
## Publish a message.
# @param val
def publish_mode_switch(self, mode=0, sleep_time=0.1):
# idle - 0
# teleop_manual - 1
# waypoint_following - 2
# ito - 10
# estop - 11
translated_mode = ""
if mode == 1:
translated_mode = "idle"
elif mode == 2:
translated_mode = "teleop"
elif mode == 3:
translated_mode = "waypoint_plan_executing"
elif mode == 11:
translated_mode = "manual"
elif mode == 12:
translated_mode = "estop"
else:
translated_mode = "kickstart"
event = {}
event['eventID'] = "topicGroup"
event['dynamicRecord'] = True
event['shadowRecord'] = False
event['eventType'] = translated_mode
event['alertMsg'] = "switching to: " + translated_mode
self.pub.publish(json.dumps(event))
time.sleep(sleep_time)
def trigger_manual_record(self, msg):
self.publish_gui_event(id_val='button press', type_val='manual record',
alert_msg=msg)
time.sleep(0.2)
def trigger_shadow_record(self):
self.publish_gui_event(id_val='button press', type_val='shadow record',
alert_msg="Shadow record captured!")
time.sleep(0.2)
def mode_switching_test(self, sleep_time=15):
print("\nSTARTING MODE SWITCHING\n")
# kick start
self.publish_mode_switch(11, sleep_time)
time.sleep(5)
self.trigger_manual_record("Mode Switching")
# 15 seconds allows for 3 bags to be recorded before a mode switch
self.publish_mode_switch(11, sleep_time)
self.publish_mode_switch(1, sleep_time)
self.publish_mode_switch(2, sleep_time)
self.publish_mode_switch(3, sleep_time)
self.publish_mode_switch(2, sleep_time)
self.publish_mode_switch(1, sleep_time)
self.publish_mode_switch(11, sleep_time)
self.publish_mode_switch(3, sleep_time)
self.publish_mode_switch(1, sleep_time)
self.trigger_manual_record("Mode Switching")
def fast_mode_switching(self, sleep_time=0.2):
print("\nSTARTING FAST MODE SWITCHING\n")
self.trigger_manual_record(" Fast Mode Switching")
time.sleep(1)
# switch between manual and idle
self.publish_mode_switch(11, sleep_time)
self.publish_mode_switch(1, sleep_time)
self.publish_mode_switch(11, sleep_time)
self.publish_mode_switch(1, sleep_time)
# switch between waypoint and teleop
self.publish_mode_switch(3, sleep_time)
self.publish_mode_switch(2, sleep_time)
self.publish_mode_switch(3, sleep_time)
self.publish_mode_switch(2, sleep_time)
# switch between idle and teleop
self.publish_mode_switch(1, sleep_time)
self.publish_mode_switch(2, sleep_time)
self.publish_mode_switch(1, sleep_time)
self.publish_mode_switch(2, sleep_time)
#time.sleep(1)
self.trigger_manual_record(" Fast Mode Switching")
time.sleep(2)
def ddr_trigger_fast_mode_switching(self):
print("\nSTARTING DDR TRIGGER FAST MODE SWITCHING\n")
self.trigger_manual_record("DDR Fast Mode Switching")
time.sleep(2)
# switch between manual and idle
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='idle', alert_msg="idle")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='manual', alert_msg="manual")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='idle', alert_msg="idle")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='manual', alert_msg="manual")
# switch between waypoint and teleop
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='waypoint_plan_executing',
alert_msg="waypoint")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='teleop', alert_msg="teleop")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='waypoint_plan_executing',
alert_msg="waypoint")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='teleop', alert_msg="teleop")
# switch between idle and teleop
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='idle', alert_msg="idle")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='teleop', alert_msg="teleop")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='idle', alert_msg="idle")
self.publish_gui_event(dynamic_record='true', id_val='topicGroup',
type_val='teleop', alert_msg="teleop")
self.trigger_manual_record("DDR Fast Mode Switching")
time.sleep(1)
def wait_for_next_active_bag(self):
for bag in os.listdir(str(self.ddr_dir)):
if bag.endswith(".bag.active"):
active_bag = bag
next_bag = bag
# wait until the next active bag appears in the folder
while active_bag == next_bag:
for bag1 in os.listdir(str(self.ddr_dir)):
if bag1.endswith(".bag.active"):
next_bag = bag1
def operational_capture_testing(self):
print("\nSTARTING OPERATIONAL CAPTURE TESTING\n")
# switch to idle and let 2 bags accumulate then switch to manual
# and let two more bags accumulate
self.publish_mode_switch(1, 10) # idle mode
self.publish_mode_switch(11, 10) # manual mode
self.trigger_manual_record("Operational Capture Testing")
time.sleep(15) # allow 3 manual bags to accumulate
self.publish_mode_switch(1, 15)
self.publish_mode_switch(11, 15) # manual mode
self.wait_for_next_active_bag()
self.trigger_shadow_record()
self.trigger_manual_record("Operational Capture Testing")
def single_bag_capture(self):
print("\nSTARTING SINGLE BAG CAPTURE TESTING\n")
self.publish_mode_switch(11, 10) # manual mode
self.wait_for_next_active_bag()
self.trigger_manual_record("Single Bag Capture")
self.trigger_manual_record("Single Bag Capture")
def multi_bag_capture(self):
print("\nSTARTING MULTI BAG CAPTURE TESTING\n")
self.publish_mode_switch(11, 10) # manual mode
self.wait_for_next_active_bag()
self.trigger_manual_record("Multi Bag Capture_1")
self.trigger_manual_record("Multi Bag Capture_1")
self.trigger_manual_record("Multi Bag Capture_2")
self.trigger_manual_record("Multi Bag Capture_2")
self.trigger_manual_record("Multi Bag Capture_3")
self.trigger_manual_record("Multi Bag Capture_3")
self.wait_for_next_active_bag()
self.trigger_manual_record("Multi Bag Capture_4")
self.trigger_manual_record("Multi Bag Capture_4")
self.trigger_manual_record("Multi Bag Capture_5")
self.trigger_manual_record("Multi Bag Capture_5")
self.trigger_manual_record("Multi Bag Capture_6")
self.trigger_manual_record("Multi Bag Capture_6")
def shadow_stress_testing(self):
print("\nSTARTING SHADOW STRESS TESTING\n")
self.trigger_manual_record("Shadow Stress Test")
self.wait_for_next_active_bag()
self.trigger_shadow_record()
self.trigger_shadow_record()
self.trigger_shadow_record()
self.trigger_shadow_record()
self.trigger_manual_record("Shadow Stress Test")
def dynamic_record_testing(self):
#configuration check
if self.test_event is not False or self.kml != "/kml.xml":
print("WARNING - rosparams not configured correctly. Did you:")
print("1. set test event mode to 'false'?")
print("2. Use the kml.xml?")
return
#warning statements
print("\nYOU ARE RUNNING PART 1 OF THE AUTOMATED TEST SCRIPT\n")
print("Remember - DDR must NOT be running before beginning part 1 \
AND the ddr_bags directory must be empty!\n\n\n")
#run tests for part 1
self.mode_switching_test()
self.fast_mode_switching(0.2)
self.ddr_trigger_fast_mode_switching()
#completion statements
print("\n\nAutomated Testing Part 1 Complete!")
print("Part 2 of DDR testing is about to begin - 15 seconds!.\n\n")
time.sleep(15)
def capture_verification_testing(self):
#configuration check
if self.test_event is not True or self.kml != "/testkml.xml":
print("WARNING - rosparams not configured correctly. Did you:")
print("1. change test event mode to 'false'?")
print("2. delete the testkml.xml?")
return
#warning statements
print("\n\nYOU ARE RUNNING PART 2 OF THE AUTOMATED TEST SCRIPT\n")
print("\nRemember - DDR must NOT be running before part 2!\n\n")
#run tests for part 2
self.operational_capture_testing()
self.single_bag_capture()
self.multi_bag_capture()
self.shadow_stress_testing()
#completion statements
print("\nAutomated Testing Part 2 Complete!")
time.sleep(10)
def start_ddr(self):
rospy.init_node('ddr_verification_scripts', anonymous=True)
roslaunch.configure_logging(self.uuid)
self.launch.start()
rospy.loginfo("ddr started")
time.sleep(5)
def kill_dynamic_recorder(self):
node = "/dynamic_recorder"
os.system("rosnode kill " + node)
time.sleep(1)
def kill_record_process(self):
os.system("killall record")
def kill_split_manager(self):
node = "/split_manager"
os.system("rosnode kill " + node)
time.sleep(1)
def start_dynamic_recorder(self):
rospy.set_param("topicXML", "/testkml.xml")
rospy.set_param("testEvent", True)
time.sleep(1)
self.kml = rospy.get_param("topicXML")
self.test_event = rospy.get_param("testEvent")
time.sleep(1)
package = "ddr_data_recorder"
executable = "dynamic_recorder.py"
node = roslaunch.core.Node(package, executable)
self.launch.launch(node)
time.sleep(2)
def start_split_manager(self):
package = "ddr_data_recorder"
executable = "manager.py"
node = roslaunch.core.Node(package, executable)
self.launch.launch(node)
time.sleep(2)
def delete_loose_bags(self):
for bag in os.listdir(str(self.ddr_dir)):
if bag.endswith(".bag"):
os.remove(os.path.join(str(self.ddr_dir), bag))
def main(self):
time.sleep(5)
welcome_message = """\n\n\nWelcome to the DDR Automated Testing Tool - Beta Version!
Remember to stop DDR before clearing the DDR bags folder.
Before beginning either phase of the automated testing:
1) Remember DDR should NOT be running
2) An EMPTY ddr_bags directory.
Good Luck!\n\n"""
print(welcome_message)
# Dynamic Recorder Testing
"""
CONFIGURATION
KML: kml.xml
testEvent: FALSE
"""
self.dynamic_record_testing()
# Capture Verification Testing
"""
CONFIGURATION
KML: testkml.xml
testEvent: TRUE
"""
self.kill_dynamic_recorder()
self.kill_record_process()
self.kill_split_manager()
self.delete_loose_bags()
self.start_dynamic_recorder()
self.start_split_manager()
self.wait_for_next_active_bag()
self.capture_verification_testing()
exit()
if __name__ == '__main__':
DDR_AUTO_TEST_SCRIPTS = AutomatedTesting()
DDR_AUTO_TEST_SCRIPTS.main()
|
from django.urls import path
from . import views
urlpatterns = [
path('create/', views.create_sensor, name='create_sensor'),
path('get/', views.get_sensor, name='get_sensor'),
]
|
#Oppage 1 side 95. Grunneleggende programmering.
#Denne delen tar imot informasjon som navn, adresse, telefornummer og utdanning.
name = input('Skriv inn navn: ') #Man skriver inn sitt navn
address = input('Skirv inn adressen din: ') #Skriver inn adressen
tlf = int(input('Skriv inn ditt telefonnummer: ')) #Telefonnummer som int()
utdanning = input('Skriv inn hovedfaget ditt: ') #Skriver inn hovedfaget
#Denne delen viser hva som ble oppgitt tidligere i programmet/sciptet.
print('Ditt navn er:', name) #printer oppgitt navn
print('Din adresse er:', address) #printer oppgitt adresse
print('Ditt telefonnummer er:', tlf) #printer oppgitt telefonnummer
print('Ditt hovedfag er:', utdanning) #printer oppgitt hovedfag.
|
#!/usr/bin/python
d=set()
def f(x, W):
y = x % W
if y == 0 or x % y != 0:
return None
A = (x-y)/W
assert x / y == W * A / y + 1
return x / y
def g(x, W):
origin = x
while True:
z = f(x, W)
if z == x or z is None:
if z is not None:
global d
d.add(origin / z)
return z
x = z
W = 1024
for i in range(1, 500000):
#print i, g(i, W)
g(i,W)
print len(d)
|
#!/usr/bin/env python
import sys, matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from networking.lib import * #@UnusedWildImport
from location.lib import * #@UnusedWildImport
from location.maps import SUNYNorthSpine as Map
n = Networking.load()
print >>sys.stderr, "Starting."
long_device_sessions = {}
for d in n.devices:
long_device_sessions[d] = []
for s in n.data_sessions:
if (s.end - s.start).seconds >= 60 and len(s.locations) > 0:
long_device_sessions[s.device].append(s)
fig = plt.figure()
map = Map()
count = 0
all_count = 0
lines = []
for d in n.devices:
for i in range(len(long_device_sessions[d])):
try:
l_0 = long_device_sessions[d][i]
l_1 = long_device_sessions[d][i + 1]
except:
continue
if l_0.__class__.__name__ == l_1.__class__.__name__:
continue
all_count += 1
if (l_1.locations[0].datetime - l_0.locations[-1].datetime).seconds < 60 and \
l_1.locations[0].dist(l_0.locations[-1]) * 1000.0 < 100.0:
start = map.m(l_1.locations[0].lon, l_1.locations[0].lat)
end = map.m(l_0.locations[-1].lon, l_0.locations[-1].lat)
if isinstance(l_0, WifiSession):
lines.append([[start[0], start[1]], [end[0], end[1]]])
else:
lines.append([[start[1], start[0]], [end[1], end[0]]])
count += 1
print >>sys.stderr, count, all_count
legends = False
map.m.imshow(map.background, origin='upper')
for line in lines:
if not legends:
first = 'Travel'
second = '3G'
third = 'Wifi'
legends = True
else:
first = second = third = '__none__'
map.m.plot(*zip(*line), color='black', label=first)
map.m.plot(*line[0], color='red', marker='o', label=second)
map.m.plot(*line[1], color='blue', marker='x', label=third)
fig.gca().legend()
fig.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0)
width, height = 7.0, 7.0 * ((map.height * 1.0) / map.width)
fig.set_size_inches(width, height)
fig.savefig('graph.pdf', dpi=300)
|
import wrcX.core
import wrcX.core.data
from statsmodels.graphics import utils
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.dates as mdates
from pandas import melt,merge, DataFrame, notnull
from numpy import arange
from wrcX.core.filters import groupClassFilter
#-------
def split_plot3(grouped_x, keyOrder, ydim,labdim, xticklabels=None, ylabel=None,
ax=None,label=False,keyHighlight=None,ylim=None):
fig, ax = utils.create_mpl_ax(ax)
start = 0
ticks = []
tmpxlabels = []
for key in keyOrder:
df=grouped_x.get_group(key)
nobs = len(df)
x_plot = arange(start, start + nobs)
ticks.append(x_plot.mean())
#Third parameter is color; 'k' is black
ax.plot(x_plot, df[ydim], 'g', linestyle='--')
#named colors: http://matplotlib.org/examples/color/named_colors.html
highlightColors=['mistyrose','lightsalmon','salmon','tomato','orangered']
baseColors=['silver','lightblue','paleturquoise','lightcyan','lightgreen']
if key==keyHighlight:
colors=highlightColors
else:
colors=baseColors
for i in range(0,nobs):
if ylim is not None and (df[ydim].iloc[i]<=ylim[0] or df[ydim].iloc[i]>=ylim[1]): continue
if label:
if nobs<=len(colors):
if keyHighlight=='leader':
if int(df[labdim].iloc[i])==1: color=highlightColors[i-nobs]
else: color=baseColors[i-nobs]
else:
color=colors[i-nobs]
else: color='pink'
ax.text(x_plot[i], df[ydim].iloc[i], int(df[labdim].iloc[i]),
bbox=dict( boxstyle='round,pad=0.3',color=color))
#elif df[ydim].iloc[i]>self.RC1SIZE:
# ax.text(x_plot[i]-1, df[ydim].iloc[i], int(df[labdim].iloc[i]),
# bbox=dict( boxstyle='round,pad=0.3',color='pink')) #facecolor='none',edgecolor='black',
else:
ax.plot(x_plot[i], df[labdim].iloc[i], 'or')
#ax.hlines(df.values.mean(), x_plot[0], x_plot[-1], colors='k')
start += nobs
tmpxlabels.append(key)
if xticklabels is None:
xticklabels=tmpxlabels
elif isinstance(xticklabels, dict):
xticklabels=[xticklabels[x] if x in xticklabels else x for x in tmpxlabels]
ax.set_xticks(ticks)
ax.set_xticklabels(xticklabels)
if ylabel is not None: ax.set_ylabel(ylabel)
ax.margins(.1, .05)
return fig
#------
def rebaser(dt,carNo,sector=False):
''' Rebase the split times on a stage against the times for a particular driver '''
if carNo=='' or carNo is None:
return dt
else:
carNo=str(int(carNo)) # go defensive on the carNo
delta='delta' if not sector else 'sectordelta'
delta_s='delta_s' if not sector else 'sectordelta_s'
_dt=dt.copy()
#If the car isn't there to rebase, don't rebase - return empty dataframe?
if carNo not in _dt['carNo'].unique(): return DataFrame()#_dt
# TO DO - also need to consider where car is there but not all split times are recorded
# Try this for now - rebase to cols that do exist, otherwise drop cols
rebaseableTimes=_dt[_dt['carNo']==carNo]['control'].unique().tolist()
_dt=_dt[_dt['control'].isin(rebaseableTimes)]
#HACK for now; TO DO - catch this properly
try:
rebasetimes={control:_dt[(_dt['carNo']==carNo) & (_dt['control']==control)][delta_s].iloc[0] for control in _dt['control'].unique()}
except: return _dt
_dt[delta_s]=_dt[delta_s]-_dt['control'].map(rebasetimes)
rebasetimes={control:_dt[(_dt['carNo']==carNo) & (_dt['control']==control)][delta].iloc[0] for control in _dt['control'].unique()}
_dt[delta]=_dt[delta]-_dt['control'].map(rebasetimes)
return _dt
#-------
def _chart_stage_sector_delta_base_core(df_entry,df_splitTimes,
ddf_sectors,gc='RC1',eligibility=None,rebase=None):
gc1=groupClassFilter(ddf_sectors,gc,eligibility)
driverOrder=df_splitTimes.sort_values('start')['carNo']
tcols=[c for c in ddf_sectors.columns if c.startswith('d_')]
#dxs=gc1.ix[:,['carNo','driverName']+tcols]
dxs=gc1.loc[:][['carNo','driverName']+tcols]
dxs=melt(dxs, id_vars=['carNo','driverName'], var_name='control', value_vars=tcols, value_name='sectordelta')
dxs=dxs[(dxs['sectordelta'].notnull()) & (dxs['sectordelta']!='') ]
if len(dxs)==0: return DataFrame(),[]
dxs['sectordelta_s']=dxs['sectordelta'].apply(lambda x: x.total_seconds())
dxs=dxs[dxs['sectordelta_s']<1000]
#This is the overall rank - need to be consistent with other ranking?
dxs['spos']=dxs.groupby('control')['sectordelta_s'].rank().astype(int)
driverOrder2=[d for d in driverOrder if d in dxs['carNo'].unique()]
return dxs,driverOrder2
def _chart_stage_sector_delta_base(df_entry,df_splitTimes,
ddf_sectors,gc='RC1',eligibility=None,rebase=None):
dxs,driverOrder2=_chart_stage_sector_delta_base_core(df_entry,df_splitTimes,
ddf_sectors,gc,eligibility,rebase)
if rebase is not None: dxs=rebaser(dxs,rebase,sector=True)
return dxs,driverOrder2
def _chart_stage_delta_s_base(df_entry,df_splitTimes,gc='RC1',eligibility=None,rebase=None):
gc1=groupClassFilter(df_splitTimes,gc,eligibility)
cols=[c for c in df_splitTimes.columns if c.startswith('time_split_')]
#HACK - TO DO - HANDLE PROPERLY TO ALLOW PARTIAL RESULTS
#If there is a NaT in a cell in a time column, the whole column is dropped in the charting?
#Hack is to remove rows that do not have a complete complement of times
gc1=gc1.dropna(subset=[c for c in gc1.columns if c.startswith('time_')])
for col in cols:
s=col.split('_')[-1]
gc1['rank_{}'.format(s)]=gc1.groupby('stage')['time_split_{}'.format(s)].rank()
gc1['rank']=gc1.groupby('stage')['time_stageTime'].rank()
driverOrder=gc1.sort_values('start')['driverName']
_tcols=[t for t in gc1.columns if t.startswith('td_split_')]
tcols=_tcols+['time_stageTime']
dt=gc1.ix[:,['carNo','driverName']+tcols]
dt['time_stageTime']=dt['time_stageTime']-dt.iloc[0]['time_stageTime']
dt=melt(dt, id_vars=['carNo','driverName'], var_name='control', value_vars=tcols, value_name='delta')
dt['delta_s']=dt['delta'].apply(lambda x: x.total_seconds() if notnull(x) else None )
#This is the RC1 rank - should we use the overall rank?
dt['spos']=dt.groupby('control')['delta_s'].rank()
dt=dt.dropna(subset=['spos','delta_s'])
#Need to handle outliers?
#dt=dt[dt['delta_s']<1000]
driverOrder2=[d for d in driverOrder if d in dt['driverName'].unique()]
if rebase is not None: dt=rebaser(dt,rebase)
return dt, driverOrder2
#------
def chart_stage_delta_s(df_entry,df_splitTimes,gc='RC1',
eligibility=None,keyHighlight=None,rebase=None,ylim=None):
dt, driverOrder2 = _chart_stage_delta_s_base(df_entry,df_splitTimes,gc=gc,
eligibility=eligibility,rebase=rebase)
#We can actually tune this for different chart types
fig, ax = plt.subplots(figsize=(15,8))
if dt.empty: return fig
split_plot3(dt[['delta_s','spos']].groupby(dt['driverName']),driverOrder2,'delta_s','spos',
ax=ax,label=True,keyHighlight=keyHighlight,ylim=ylim)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
if ylim is not None: ax.set_ylim(ylim)
return fig
def chart_stage_split_pos(df_entry,df_splitTimes,gc='RC1',eligibility=None,keyHighlight=None, ylim=None):
dxs,driverOrder2=_chart_stage_delta_s_base(df_entry,df_splitTimes,
gc=gc,eligibility=eligibility)
fig, ax = plt.subplots(figsize=(15,8))
if dxs.empty: return fig
split_plot3(dxs[['driverName','spos']].groupby('driverName'),
driverOrder2,'spos','spos',ax=ax,label=True,keyHighlight=keyHighlight)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
if ylim is not None: ax.set_ylim(ylim)
return fig
def chart_stage_sector_delta(df_entry=None,df_splitTimes=None,ddf_sectors=None,dxs=None,driverOrder2=None,gc='RC1',
eligibility=None,keyHighlight=None,rebase=None,xticklabels=None,ylim=None):
if dxs is None:
dxs,driverOrder2=_chart_stage_sector_delta_base(df_entry,df_splitTimes,
ddf_sectors,gc=gc,eligibility=eligibility,rebase=rebase)
fig, ax = plt.subplots(figsize=(15,8))
if driverOrder2==[] or dxs.empty: return fig
if xticklabels is None: xticklabels= wrcX.core.data.driverDict
split_plot3(dxs[['carNo','driverName','sectordelta_s','spos']].groupby('carNo'),
driverOrder2,'sectordelta_s','spos',
xticklabels=xticklabels,ax=ax,label=True,keyHighlight=keyHighlight,ylim=ylim)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
if ylim is not None: ax.set_ylim(ylim)
return fig
def chart_stage_sector_pos(df_entry=None,df_splitTimes=None,ddf_sectors=None,dxs=None,driverOrder2=None,gc='RC1',
eligibility=None,keyHighlight=None,xticklabels=None):
if dxs is None:
dxs,driverOrder2=_chart_stage_sector_delta_base(df_entry,df_splitTimes,
ddf_sectors,gc=gc,eligibility=eligibility)
fig, ax = plt.subplots(figsize=(15,8))
if dxs.empty: return fig
if xticklabels is None: xticklabels= wrcX.core.data.driverDict
split_plot3(dxs[['carNo','driverName','spos']].groupby('carNo'),
driverOrder2,'spos','spos',
xticklabels=xticklabels,ax=ax,label=True,keyHighlight=keyHighlight)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
return fig
#--------
def basicStagePosChart(gc='RC1',eligibility=None,maxstages=None,stagemax=None,stagemin=None):
df_overall = wrcX.core.data.df_overall[:]
maxstages=df_overall['stage'].max() if maxstages is None else maxstages
stagemin=df_overall['stage'].min() if stagemin is None else stagemin
stagemax=maxstages if stagemax is None else stagemax
gc1=df_overall[(df_overall['groupClass']==gc) & (df_overall['stage']<=stagemax)].reset_index(drop=True)
gcSize=len(gc1[(gc1['groupClass']==gc) & (gc1['stage']==1)]) #should really do this from entry
gc1['xrank']= (gc1['pos']>gcSize)
gc1['xrank']=gc1.groupby('stage')['xrank'].cumsum()
gc1['xrank']=gc1.apply(lambda row: row['pos'] if row['pos']<=gcSize else row['xrank'] +gcSize, axis=1)
fig, ax = plt.subplots(figsize=(15,8))
ax.get_yaxis().set_ticklabels([])
gc1.groupby('driverName').plot(x='stage',y='xrank',ax=ax,legend=None);
for i,d in gc1[gc1['xrank']>gcSize].iterrows():
ax.text(d.loc(i)['stage'], d.loc(i)['xrank'], int(d.loc(i)['pos']),
bbox=dict( boxstyle='round,pad=0.3',color='pink')) #facecolor='none',edgecolor='black',
plt.xlim(stagemin-1, maxstages+0.9) #max stages
for i,d in gc1[gc1['stage']==stagemin].iterrows():
ax.text(stagemin-0.95, d.loc(i)['xrank'], d.loc(i)['driverName'])
for i,d in gc1[gc1['stage']==maxstages].iterrows():
ax.text(maxstages+0.1, d.loc(i)['xrank'], d.loc(i)['driverName'])
#If we have a large offset for names ensure we don't display non-stage numbers
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xaxis().get_majorticklabels()]
labels = [l if (l!='' and int(float(l)) >= stagemin and int(float(l)) <= maxstages) else '' for l in labels ]
ax.set_xticklabels(labels)
plt.gca().invert_yaxis()
return plt
def timePlot(typ='td_diffFirst',max_mult=25,max_xs=900,gc="RC1",eligibility=None,
maxstages=None,stagemax=None ):
df_entry = wrcX.core.data.df_entry[:]
df_overall = wrcX.core.data.df_overall[:]
maxstages=df_overall['stage'].max() if maxstages is None else maxstages
stagemax=maxstages if stagemax is None else stagemax
gc1=df_overall[(df_overall['groupClass']==gc) & (df_overall['stage']<=stagemax)].reset_index(drop=True)
fig, ax = plt.subplots(figsize=(15,8))
#ax.get_yaxis().set_ticklabels([])
gc1['td_time_s']=gc1['td_time'].dt.total_seconds()
gc1[typ+'_s']=gc1[typ].fillna(0).dt.total_seconds()
max_s=gc1[gc1['stage']==max(gc1['stage'])][typ+'_s'].max()
max_s= max_s if max_s<max_xs else max_xs
if max_mult is not None: max_s= max_s if max_s<(max_mult*stagemax) else max_mult*stagemax
ax.set_ylim([0,max_s])
gc1.groupby('driverName').plot(x='stage',y=typ+'_s', ax=ax,legend=None);
plt.xlim(1, maxstages) #max stages
for i,d in gc1[(gc1['stage']==max(gc1['stage'])) & (gc1[typ+'_s'] < max_s)].iterrows():
ax.text(max(gc1['stage'])+0.3, d.loc(i)[typ+'_s'], d.loc(i)['driverName'])
return plt;
def sectorDeltaBarPlot(df_entry=None,df_splitTimes=None,ddf_sectors=None,dxs=None,driverOrder2=None,
flip=False,sortTotal=True,title='Delta by sector', gc='RC1',eligibility=None,rebase=None):
if dxs is None:
dxs,driverOrder2=_chart_stage_sector_delta_base(df_entry,df_splitTimes,
ddf_sectors,gc=gc,eligibility=eligibility,rebase=rebase)
fig, ax = plt.subplots(figsize=(15,8))
if dxs.empty: return fig
dxsp=dxs.pivot('driverName','control','sectordelta_s')
if sortTotal:
tcols=[c for c in dxsp.columns if c.startswith('d_sector_')]
dxsp['stagediff']=dxsp[tcols].sum(axis=1).round(1)
dxsp=dxsp.sort_values('stagediff')[tcols]
if rebase is not None: title='{} (relative to car {})'.format(title,rebase)
dxsp.plot(kind='barh',title=title,ax=ax)
#Also try to position the legend hopefully into whitespace
xmin, xmax = ax.get_xlim()
if flip:
ax.invert_xaxis()
if abs(xmin)<=abs(xmax):
ax.legend(loc='upper left')
else: ax.legend(loc='lower right')
else:
if abs(xmin)<=abs(xmax):
ax.legend(loc='upper right')
else: ax.legend(loc='lower left')
ax.invert_yaxis()
plt.ylabel('')
plt.xlabel('Sector Delta (s)')
return plt;
|
from __future__ import print_function
import sys
import os
def main():
if 'CXX' not in os.environ:
cxx = 'DEFAULT_CXX'
else:
cxx = os.environ['CXX']
print(cxx, sys.executable, sys.version)
if __name__ == '__main__':
main()
|
tabuada = 1
numero = 1
while tabuada <= 9:
print("%d x %d = %d" % (tabuada, numero, tabuada * numero))
numero = numero + 1
if numero == 11:
numero = 1
tabuada = tabuada + 1
|
#!/usr/bin/env python
'''
Program : lineup.py
Version : 1.0
Author : b.k.gjelsten@fys.uio.no
Description :
'''
import sys,os
import bkgjelstenArgReader
# ##################################################### GLOBAL METHODS
# ##################################################### GLOBAL METHODS
# ##################################################### GLOBAL METHODS
def lineup(lines=[], optD={}, cmd=['ReadArg','PostInit','Lineup'], n=-1):
zcmd = list(cmd) # 2013-11-20: needed to do this, otherwise the default cmd value is changed once ReadArg is removed below. Strange.
if ('ReadArg' in zcmd) and (not 'argv' in optD): zcmd.remove('ReadArg')
if n>-1: optD['n'] = n # shortcut
oLineup = Lineup(lines=lines, cmd=zcmd, optD=optD)
linedup = oLineup.linedup
just = oLineup.res['just'] # 2013-11-03
del oLineup
returnAlignArray = optD.get('align',0) # allows to return the alignment array as well (useful for e.g. making html table)
if returnAlignArray:
return linedup, just
else:
return linedup
class Lineup:
def __init__(s, lines=[], cmd=['ReadArg','PostInit','Lineup'], optD={}):
# ====================== PRE INIT
if 'argv' in optD: s.argv = optD['argv']
else: s.argv = sys.argv
s.cmd = cmd
s.myname = sys.argv[0].split('/').pop()
s.VB = 0
s.HOME = os.getenv('HOME')
s.cwd = os.getcwd() # current work directory
#s.dir0 = '%s/XXX' %(s.HOME)
s.dir0 = ''
s.dict = {}
s.warn = []
s.fn_warn = 'warnings.txt'
s.fn_report = 'report'
s.report = []
s.lines = list(lines)
s.lineup_nspaces = 3
if 'n' in optD: s.lineup_nspaces = optD['n']
if 'nspaces' in optD: s.lineup_nspaces = optD['nspaces']
s.lineup_align = ['l','r']
s.lineup_autoalign = 1 # skips lineup_align
s.lineup_autoexcept = 1 # skips lines with wrong n(col)
s.lineup_exceptlines = []
s.lineup_exceptpattern = ['---------------','===============']
s.lineup_adjusthline = 1 # lines in lines_except which are hline in '-','=' or '#' get their width adjusted
s.lineup_defline = -1
s.lineup_header = [0] # header lines ... will not affect the auto-alignment based on whether a column is numbers or not
s.lineup_headerpattern = [] # header lines ... will not affect the auto-alignment based on whether a column is numbers or not
s.lineup_wordisnumber = ['None','NaN','NONE','NAN','none','nan','-'] # these words are treated as numbers (i.e. right-aligned): e.g. 'None', 'NaN'
s.lineup_maxcol = 999
s.lineup_colsonly = []
s.lineup_colsskip = []
s.lineup_delim = ['']
# ====================== READ ARG
if 'ReadArg' in s.cmd: s.ReadArg()
# ====================== READ ARG
if 'stdin.readlines' in s.cmd:
zz = sys.stdin.readlines()
for z in zz: s.lines.append(z.rstrip())
# ====================== POST INIT
if 'PostInit' in s.cmd: s.PostInit()
# ====================== EXECUTE
if 'Lineup' in s.cmd:
s.res = s.Lineup()
s.linedup = s.res['outs'] #Lineup()
# ====================== PRINT?
if 'Print' in s.cmd:
for out in s.linedup: print out
# ====================== RETURN?
#if 'Return' in s.cmd:
# return s.linedup # apparently not allowed
# SUMMARY
if len(s.warn):
print 'WARNINGS (%i):' %(len(s.warn))
for warn in s.warn: print ' '+warn
# ##########
def PostInit(s):
if s.dir0: s.fn_warn = '%s/%s' %(s.dir0, s.fn_warn)
if s.dir0: s.fn_report = '%s/%s' %(s.dir0, s.fn_report)
# ##################################################### CLASS METHODS
# ##################################################### CLASS METHODS
# ##################################################### CLASS METHODS
# ##########
def showHelp(s):
print ' Usage: cat <txtfile> | %s [options]' %(s.myname)
#print ' %s -dict test,txt1:I,testI,3:test2,txt2a,txt2b:F,testF,4.14 # for using autodict (NB: vars need to be defined in __init__)' %(s.myname)
print " Ex: cat <txtfile> | %s --clean # to clean any auto-settings" %(s.myname)
print
print " Note: Default is auto with first line as potential header line"
# ##########
def DumpWarnings(s):
f = open(s.fn_warn,'w')
for out in s.warn: f.write('%s\n' %(out))
f.close()
# ##########
def Lineup(s):
lines = s.lines
Nlines = len(lines)
maxwidth = []
lines_except = []
autoalign = []
outs = []
# ### colsonly, colsskip: replace text in header with column
for iL in s.lineup_header: # can make more general by giving real header in separate variable
if len(lines) <= iL: continue # fragile?
whead = lines[iL].strip().split()
for lineup_cols in [s.lineup_colsonly, s.lineup_colsskip]:
notreplaced = []
for iC in range(len(lineup_cols)):
col = lineup_cols[iC]
if type(col) is not int:
# assume it is string
wasreplaced = False
for iw in range(len(whead)):
headtxt = whead[iw]
if headtxt == col:
lineup_cols[iC] = iw # replace the text with the integer from the header
wasreplaced = True
break
if not wasreplaced:
notreplaced.append(col)
for col in notreplaced:
lineup_cols.remove(col)
s.warn.append("Warning Lineup: column '%s' ignored as was not found in header" %(col))
# ### autoexcept: find the number of columns in the table lines, except all others from aligning
ncols = {}
if s.lineup_autoexcept:
for iL in range(Nlines):
nw = len(lines[iL].strip().split())
if nw in [0,1]: continue
if nw not in ncols: ncols[nw] = 0
ncols[nw] += 1
# ---
themax = [-1,-1]
for nw in ncols:
if s.VB: print ' %2i: %2i' %(nw,ncols[nw])
if ncols[nw] > themax[1]: themax = [nw,ncols[nw]]
ncoltable = themax[0]
if ncoltable == -1: autoexcept = 0
#ncoltable = max(ncols, key=ncols.get) # method to get key with largest value
#print 'table: ', ncoltable
# ---
# ### Find maxwidth per column
for iL in range(Nlines):
line = lines[iL].strip()
word = line.split() #string.split(line)
if s.VB>1: print 'len(word):%i ncoltable:%i' %(len(word), ncoltable)
# --- skip some lines (will be printed as were)
if iL in s.lineup_exceptlines or iL-Nlines in s.lineup_exceptlines or (s.lineup_autoexcept and len(word) != ncoltable):
lines_except.append(iL)
if s.VB>1: print "except line %2i: %s" %(iL, line)
continue
skip = 0
for patt in s.lineup_exceptpattern:
if patt in line:
skip = 1
break
if skip:
lines_except.append(iL)
continue
# --- end skipping
# allow changes to skip columns:
if s.lineup_colsonly:
line2 = ''
#for iw in range(len(word)):
# if iw in s.lineup_colsonly: line2 += ' '+word[iw]
for iw in s.lineup_colsonly: # rearranging too!
if iw < len(word): line2 += ' '+word[iw]
lines[iL] = line = line2.strip()
word = line.split()
if s.lineup_colsskip:
line2 = ''
for iw in range(len(word)):
if iw not in s.lineup_colsskip: line2 += ' '+word[iw]
lines[iL] = line = line2.strip()
word = line.split()
# ---
# Go through the columns
for iw in range(len(word)):
# fills out maxwidth upon need
if(iw==len(maxwidth)):
maxwidth.append(0) # init first time
autoalign.append('r')
if(len(word[iw])>maxwidth[iw]): maxwidth[iw]=len(word[iw])
# --- autoalign
for patt in s.lineup_headerpattern:
if patt in line and iL not in s.lineup_header:
s.lineup_header.append(iL)
# print s.lineup_header
break
if iL not in s.lineup_header: # and iL not in lineup_exceptlines:
try:
if word[iw].endswith('%'): float(word[iw][:-1]) # allows percentages
else: float(word[iw])
# if is float, don't have to do anything, because was already init'ed as float, i.e. 'r'
except:
if word[iw] not in s.lineup_wordisnumber:
# print iL, iw, word[iw]
autoalign[iw] = 'l' # if one time not a number, then left-align
# ---
# ### Combine autoalign and lineup_align (simple for now:one or the other, no combination)
if s.lineup_autoalign: just = autoalign
else: just = s.lineup_align
# print just
njust = len(just)
maxcol = s.lineup_maxcol
#rightmargin = sum(maxwidth) + s.lineup_nspaces * (len(maxwidth)-1) # need to generalise when delim is set
# if inserting text the total width must be calculated like this:
# Make s.lineup_delim complete in case it is not
#if s.lineup_delim:
for i in range(len(s.lineup_delim), len(maxwidth)):
s.lineup_delim.append(s.lineup_nspaces*" ")
s.lineup_delim.append("")
#if len(s.lineup_delim) > 0:
rightmargin = len(s.lineup_delim[len(maxwidth)])
for i in range(len(maxwidth)):
rightmargin += len(s.lineup_delim[i])
rightmargin += maxwidth[i]
#if len(s.lineup_delim) > len(maxwidth): rightmargin += len(s.lineup_delim[len(maxwidth)-1])
# print rightmargin
for iL in range(Nlines):
line = lines[iL].strip()
word = line.split()
outline=""
if iL in lines_except:
if line != '' and s.lineup_adjusthline:
if line == len(line)*'-' or line == len(line)*'=' or line == len(line)*'#':
line = rightmargin*line[0]
#print line
outs.append(line)
continue
for iw in range(len(word)):
if iw > maxcol-1: # not sure what the intention of this was. (never/rarely used)
outline+=" "+word[iw]
continue
#if iw == 0 and s.lineup_delim: outline += s.lineup_delim[iw]
#if 0 < iw < len(s.lineup_delim): outline += s.lineup_delim[iw] + s.lineup_nspaces*" "
#if iw>0: outline += s.lineup_nspaces*" "
outline += s.lineup_delim[iw]
if iw > njust-1:
thisjust = just[njust-1]
else:
thisjust = just[iw]
if thisjust in ('l','L'):
outline+=word[iw].ljust(maxwidth[iw])
elif thisjust in ('r','R'):
outline+=word[iw].rjust(maxwidth[iw])
elif thisjust in ('c','C'):
# outline+=word[iw].center(maxwidth[iw]+myspace)
outline+=word[iw].center(maxwidth[iw])
else:
print "non-allowed code: %s" %thisjust
outline = outline.rstrip()
#print outline
outs.append(outline)
res = {}
res['outs'] = outs
res['just'] = just
return res
# ##########
def ReadArg(s):
# ################################### ARGUMENT READING
Arg = bkgjelstenArgReader.ArgReader(s.argv, VB=0)
'''
if Arg.hasget('-alist'): print 'a string list: ',Arg.list()
if Arg.hasget('-alisti'): print 'an integer list: ',Arg.listI()
if Arg.hasget('-alistf'): print 'a float list: ',Arg.listF()
if Arg.hasget('-x'): print 'a string: ',Arg.val()
if Arg.hasget('-xI'): print 'an integer: ',Arg.valI()
if Arg.hasget('-xF'): print 'a float: ',Arg.valF()
'''
if Arg.has(['-h','--help','--h','-help']):
s.showHelp()
sys.exit()
if Arg.hasget('-vb'):
s.VB = Arg.valI()
if s.VB: print 'Verbosity level: %i' %(s.VB)
if Arg.has(['--clean','--c']):
# this option is to clean any default auto-settings
s.lineup_header = []
s.lineup_wordisnumber = []
if Arg.hasget(['-delim']):
s.lineup_delim = Arg.list()
#print s.lineup_delim
s.lineup_nspaces = 0 # <-- so removes those ... need to set manually with -nspaces
if Arg.hasget(['-nspaces','-n']):
s.lineup_nspaces = Arg.valI()
if Arg.hasget('-header'):
for z in Arg.list():
try: s.lineup_header.append(int(z))
except: s.lineup_headerpattern.append(z)
if Arg.hasget('-colsonly'):
s.lineup_colsonly = Arg.listIif()
if Arg.hasget('-colsskip'):
s.lineup_colsskip = Arg.listIif()
if Arg.hasget(['-likenumberclean','-isnumberclean','-wordisnumberclean','-treatlikenumberclean']):
s.lineup_wordisnumber = Arg.list()
if Arg.hasget(['-likenumber','-isnumber','-wordisnumber','-treatlikenumber']): # Note, this adds to the existing wordisnumbers
s.lineup_wordisnumber += Arg.list()
if Arg.hasget('-exceptlines'):
s.lineup_exceptlines = Arg.listI()
if Arg.hasget('-align'):
s.lineup_align = Arg.list()
if Arg.hasget('-autoalign'):
s.lineup_autoalign = Arg.valI()
if Arg.hasget('-autoexcept'):
s.lineup_autoexcept = Arg.valI()
# ----- The new general procedure for var input (should this be put into the ArgReader?)
if Arg.hasget('-dict'):
zs = Arg.list(':')
# print zs
for z in zs:
zw = z.split(',')
# First determine var type (default is string)
ztype = 'string'
if zw[0] in ['I']: ztype = zw.pop(0)
elif zw[0] in ['F']: ztype = zw.pop(0)
# Then get the key / var name and check
key = zw.pop(0)
if key not in s.dict:
# this restriction might be dropped
print s.dict
sys.exit('FATAL non-existing var set with -var: %s (%s)' %(key, zs))
if len(zw) == 0: sys.exit('FATAL non-allowed arg for -var: %s' %(zs))
# The fill the dict/var
s.dict[key] = [] # First make a list. If only one entry, turn list into a plain value (bottom)
for zw1 in zw:
zval = zw1
if ztype == 'I': zval = int(zw1)
elif ztype == 'F': zval = float(zw1)
s.dict[key].append(zval)
if len(zw) == 1: s.dict[key] = s.dict[key][0] # if just one entry, don't use list
# -----
if not Arg.AllOk():
print 'Problems...'
s.showHelp()
sys.exit("FATAL Ending due to problems of arguments")
# ################################### POST-INIT
############################## EXECUTE IF RUN AS SCRIPT (NOT JUST IMPORTED)
if __name__ == '__main__':
t = Lineup(cmd=['ReadArg','stdin.readlines', 'PostInit','Lineup','Print'])
##############################
|
from __future__ import print_function
from sympy import *
# numerical value of PI
PI = pi.n()
def gen_basis_set( h, N ):
# generate grid points
A = -(N-1)/2.0*h
xgrid = []
for i in range(N):
xgrid.append(A + i*h)
#
x = symbols('x')
bfs = []
for i in range(N):
bfs.append( sin(PI/h*(x-xgrid[i]))/sqrt(h)/(PI*(x-xgrid[i])/h) )
#
return xgrid, bfs
h = 0.2
N = 5
x = symbols('x')
xgrid, bfs = gen_basis_set(h, N)
# need to use scipy
import scipy.special as sp
import numpy as np
import math
def compute_F( t, x_bar, h ):
if x_bar < 1e-30:
return math.sqrt(h)*sp.erf( np.pi/(2*h*t) )
else:
z = np.complex( np.pi/(2*h*t), t*x_bar )
w_iz = sp.erfcx( z )
f = math.exp( -t**2 * x_bar**2 )
f = f - np.real( np.exp(-t**2 * x_bar**2 - z*z)*w_iz )
f = f*math.sqrt(h)
return f
t = 0.1
ibf1 = 0
ibf2 = 1
xx = xgrid[ibf2]
x_bar = math.fabs( xx - xgrid[ibf1] )
print('x_bar = %18.10f' % x_bar)
print( 'F = %18.10f' % compute_F( t, x_bar, h ))
# diagonal: ibf1 = ibf2, should gives erf
# non-diagonal: ibf1 != ibf2
def calc_F_v1( t, ibf1, ibf2 ):
f = exp(-t**2*( x - xgrid[ibf1] )**2) * bfs[ibf2]
print(f)
print( 'sympy v1: %18.10f' % Integral( f, (x,-oo,oo) ).evalf() )
# Lee-Tuckerman (2008)
def calc_F_v2( t, ibf1, ibf2 ):
# integrand
beta = PI/(h*t)
xbar = xgrid[ibf1] - xgrid[ibf2]
f = exp(-x**2) * sin(beta*(x + t*xbar))/(x + t*xbar) * sqrt(h)/PI
print( 'sympy v2: %18.10f' % Integral( f, (x,-oo,oo) ).evalf() )
#calc_F_v1( t, ibf1, ibf2 )
calc_F_v2( t, ibf1, ibf2 )
|
from sys import argv
scripts, source, dest = argv
i = 0
source = float(source)
dest = float(dest)
print ("source is:", source)
print ("dest is:", dest)
while source <= dest:
source = source * (1 + 0.1)
i = i + 1
print ("需要", i, "次涨停") |
fileName = input('Enter a filename: ')
file = open(fileName, 'r')
words = []
for line in file:
for word in line.split():
words.append(word.upper())
dict = {}
for word in words:
number = dict.get(word, None)
if number == None:
dict[word] = 1
maximum = max(dict.values())
for key in dict:
if dict[key] == max:
print('The mode is: ', key)
break |
import sys
import os
import datetime
import inspect
import h5py
import random
import numpy as np
import matplotlib.pyplot as plt
import littlefish.core.fish as fi
import littlefish.core.simulation as si
import littlefish.core.terrain as tr
log_folder = r'C:\little_fish_simulation_logs'
simulation_length = 2000 # 100000
random_seed = 111
np_random_seed = 50
if not os.path.isdir(log_folder):
os.mkdir(log_folder)
os.chdir(log_folder)
random.seed(random_seed)
np.random.seed(np_random_seed)
fish = fi.generate_standard_fish()
save_name = 'fish_' + datetime.datetime.now().strftime('%y%m%d_%H_%M_%S') + '.hdf5'
save_f = h5py.File(save_name)
fish_grp = save_f.create_group('fish_' + fish.name)
fish.to_h5_group(fish_grp)
tg = tr.TerrainGenerator(size=[128, 128], sea_level=0.6)
terrain_map = tg.generate_binary_map(sigma=3., is_plot=True)
plt.show()
terrain = tr.BinaryTerrain(terrain_map)
simulation = si.Simulation(terrain=terrain, fish_list=[fish],
simulation_length=simulation_length, food_num=20)
simulation.initiate_simulation()
msg = simulation.run(verbose=1)
sim_grp = save_f.create_group('simulation_' + datetime.datetime.now().strftime('%y%m%d_%H_%M_%S'))
sim_grp['random_seed'] = random_seed
sim_grp['np_random_seed'] = np_random_seed
sim_grp['simulation_length'] = simulation_length
sim_grp['script_txt'] = inspect.getsource(sys.modules[__name__])
simulation.save_log_to_h5_grp(sim_grp, is_save_psp_waveforms=False)
print('for debug ...') |
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# helper.py
# Author: Alan Ding
# ------------------------------------------------------------------------------
from database import db, Message
from sqlalchemy import func
def add_message(time, sender, message):
if len(get_message_log()) > 9990:
least_recent = db.session.query(func.min(Message.time))
db.session.delete(least_recent)
db.session.commit()
message = Message(time=time, sender=sender, message=message)
db.session.add(message)
db.session.commit()
def _message_time(message):
return message.time
# Returns a list of dictionaries sorted by time
def get_message_log():
all_messages = db.session.query(Message).all()
sorted(all_messages, key=_message_time)
return list(map(lambda message: {'time': message.time,
'sender': message.sender,
'message': message.message},
all_messages))
|
from config.wsgi import *
from core.brain.models import *
positions = ['Agente', 'Supervisor', 'Formador', 'ACCM']
Lobs = ['BGI ARGENTINA', 'BGI CHILE', 'BGI COLOMBIA', ]
for i in range(0, len(positions)):
try:
position = Position()
position.position_name = positions[i]
position.save()
except Exception as e:
print(e)
for i in range(0, len(Lobs)):
try:
lob = Lob()
lob.lob_name = Lobs[i]
lob.save()
except Exception as e:
print(e)
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="homepage"),
path("addfile/", views.add_file, name="yougotitfiles"),
] |
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from datetime import datetime
import openerp.addons.decimal_precision as dp
class account_move(osv.osv):
_name = "account.move"
_inherit = "account.move"
def onchange_import(self,cr,uid,ids,context=None):
act_obj = self.pool.get('account.move')
upd_obj = self.pool.get('account.move.line')
cur_obj = act_obj.browse(cr, uid, ids, context=context)
obj_rate = self.pool.get('res.currency.rate')
for v in cur_obj:
move_id = upd_obj.search(cr, uid,[('move_id', '=', v['id'])])
if move_id:
for mv in move_id:
obj= upd_obj.read(cr, uid, mv)
accounts_rate_id = obj_rate.search(cr, uid,[('currency_id', '=', obj['currency_id'][0]),('name','<=',v['date'])],limit=10)
divisa = obj_rate.browse(cr, uid, int(accounts_rate_id[0]))
divisa_rate =divisa['rate']
if obj['amount_currency'] > 0:
monto = obj['amount_currency'] / divisa_rate
if monto <0:
monto *= -1
upd_obj.write(cr, uid, mv,{'debit' : monto ,'credit' : 0 }, context=context)
elif obj['amount_currency'] < 0:
monto = obj['amount_currency'] / divisa_rate
if monto <0:
monto *= -1
upd_obj.write(cr, uid, mv,{'credit' : monto ,'debit' : 0}, context=context)
return True
account_move() |
# -*- coding:utf-8 -*-
from flask import Flask
from flask_cors import CORS
from datetime import datetime, timedelta
app = Flask(__name__)
app.config['SECRET_KEY'] = 'skeyasdasdasdas'
app.config['JWT_EXPIRATION_DELTA'] = timedelta(days=50)
app.config['JWT_AUTH_URL_RULE'] = None
app.config['JWT_AUTH_EMAIL_KEY'] = "email"
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db' #'mysql+mysqldb://libtrip:libtrip2016@47.88.17.25:3306/libtrip?charset=utf8'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqldb://user:pass@host:port/dbname?charset=utf8'
app.config['TEMP_DIR'] = 'tmp'
CORS(app)
|
class Solution:
def isValid(self, characters):
length = len(characters)
if len(characters) % 2 == 1:
return
for i in range(length//2):
characters = characters.replace('[]', '').replace('{}', '').replace('()', '')
return len(characters) <= 0
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('skychallenge_clean_data.csv')
df = df.head(10000)
# df = df.drop(columns = ['condition', 'fuel', 'transmission'])
df = df.drop_duplicates()
df
# In[2]:
from statsmodels.graphics.correlation import plot_corr
from sklearn.model_selection import train_test_split
import seaborn as sns
plt.style.use('seaborn')
x = df.drop('price', axis=1)
y = df[['price']]
seed = 42
test_data_size = 0.25
X_train, X_test, Y_train, Y_test = train_test_split(x,y, test_size = test_data_size, random_state = seed)
train_data = pd.concat([X_train, Y_train], axis = 1)
test_data = pd.concat([X_test, Y_test], axis = 1)
corrMatrix = train_data.corr(method= 'pearson')
xnames = list(train_data.columns)
ynames = list(train_data.columns)
# In[ ]:
from sklearn import linear_model
from sklearn.metrics import explained_variance_score, accuracy_score
from sklearn.svm import SVC
X_train = np.array(X_train)
Y_train = np.ravel(np.array(Y_train))
X_test = np.array(X_test)
Y_test = np.array(Y_test)
clf = SVC()
clf.fit(X_train, Y_train)
preds = clf.predict(X_train)
# print(type(preds))
# print(type(Y_test))
train_acc = explained_variance_score(Y_train, preds)
# In[24]:
print(train_acc)
# In[9]:
from sklearn import linear_model
from sklearn.metrics import explained_variance_score, accuracy_score
X_train = np.array(X_train)
Y_train = np.ravel(np.array(Y_train))
X_test = np.array(X_test)
Y_test = np.array(Y_test)
clf = linear_model.SGDClassifier(max_iter = 10000)
clf.fit(X_train, Y_train)
preds = clf.predict(X_test)
train_acc = accuracy_score(Y_test, preds)
print(train_acc)
# In[3]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
rf_model = RandomForestClassifier(random_state=1, n_estimators=50, max_depth=10, min_samples_leaf=3)
rf_model.fit(X_train, Y_train)
train_preds = rf_model.predict(X_train)
train_acc = accuracy_score(Y_train, train_preds)
print(train_acc)
# In[ ]:
|
import torch
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
save_as_images, display_in_terminal, convert_to_images)
import os
from quart import Quart, send_file, request, make_response
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
import random
app = Quart(__name__)
tflib.init_tf()
models = {
"biggan-deep-512" : BigGAN.from_pretrained('biggan-deep-512'),
"biggan-deep-256" : BigGAN.from_pretrained('biggan-deep-256'),
"waifu" : pickle.load(open("2019-04-30-stylegan-danbooru2018-portraits-02095-066083.pkl", 'rb'))[-1],
"celeb" : pickle.load(open("karras2019stylegan-celebahq-1024x1024.pkl", 'rb'))[-1]
}
def get_model(name="biggan-deep-256"):
"Get the deep model from known models"
return models[name]
def generate_waifu(model_name, truncation):
global img_i
model = get_model(model_name)
rnd = np.random.RandomState(random.randint(1, 10000))
latents = rnd.randn(1, model.input_shape[1])
# gen image
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = model.run(latents, None, truncation_psi=float(truncation), randomize_noise=True, output_transform=fmt)
file_name = f"images/{img_i}.png"
img_i += 1
print(f"Generated waifu at {file_name}")
PIL.Image.fromarray(images[0], 'RGB').save(file_name)
return file_name
img_i = 0
def generate_image(thing="mushroom", model_name="biggan-deep-512", truncation=0.4):
"Generate an image of *thing* from the model, save it and return the path"
if model_name in ["waifu", "celeb"]: return generate_waifu(model_name, truncation)
global img_i
model = get_model(model_name)
# Prepare a input
class_vector = one_hot_from_names([thing], batch_size=1)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=1)
# All in tensors
noise_vector = torch.from_numpy(noise_vector)
class_vector = torch.from_numpy(class_vector)
# If you have a GPU, put everything on cuda
noise_vector = noise_vector.to('cuda')
class_vector = class_vector.to('cuda')
model.to('cuda')
# Generate an image
with torch.no_grad():
output = model(noise_vector, class_vector, truncation)
# If you have a GPU put back on CPU
output = output.to('cpu')
img = convert_to_images(output)
out = img[0]
file_name = f"images/{img_i}.png"
img_i += 1
os.system("mkdir -p images/")
out.save(file_name, 'png')
print(f"Generated an image of {thing} in file {file_name} with model {model_name}")
return file_name
import traceback
@app.route('/')
def image_request():
thing = request.args.get('thing') or "mushroom"
truncation = request.args.get('truncation') or 0.7
model = request.args.get('model') or "waifu"
try:
print(f"{thing} {model} {truncation}")
filename = generate_image(thing=thing, model_name=model, truncation=float(truncation))
return send_file(filename, mimetype="image/png")
except:
traceback.print_exc()
return make_response(("Can't generate image", 502))
if __name__ == "__main__":
# Need to generate BigGAN first, otherwise waifu exhausts the GPU
# memory (on my 1060 Ti)
generate_image(model_name="biggan-deep-512")
generate_image(model_name="waifu")
app.run(debug=True)
# print("Generating a test image")
# print(generate_image("lamp"))
|
import os
def save_picture(form_picture):
f_name,f_ext=os.path.splitext(form_picture.filename)
picture_fn = f_name + f_ext
picture_path=os.path.join(app.root_path,'static/pictures',picture_fn)
form_picture.save(picture_path)
return picture_fn
from flask import render_template,url_for,flash,redirect,request
from flask_1 import app,db,bcrypt
from flask_1.models import user
from flask_1.forms import RegistrationForm,LoginForm,UpdateAccountForm,DataForm1,DataForm2
from flask_login import login_user,current_user,logout_user,login_required
posts=[
{
'name':'Aman Punetha',
'email':'amanpunetha@gmail.com',
'date': 'July 01,2019'
},
{
'name':'Ajay Bisht',
'email':'ajaybisht@gmail.com',
'date': 'July 02,2019'
}
]
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html',posts=posts)
@app.route("/about")
def about():
return render_template('about.html',title='About')
@app.route("/register",methods=['get','post'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password=bcrypt.generate_password_hash(form.password.data).decode('utf-8')
User=user(username=form.username.data,email=form.email.data,password=hashed_password)
db.session.add(User)
db.session.commit()
flash(f'Thank you for reister!','success')
flash(f'You are now able to access your account!','success')
return redirect(url_for('login'))
return render_template('register.html',title='Register',form=form)
@app.route("/login",methods=['get','post'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
User=user.query.filter_by(email=form.email.data).first()
if User and bcrypt.check_password_hash(User.password,form.password.data):
login_user(User,remember=form.remember.data)
next_page=request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check email and password','danger')
return render_template('login.html',title='Login',form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/account",methods=['get','post'])
@login_required
def account():
form=UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file=save_picture(form.picture.data)
current_user.image_file=picture_file
current_user.username=form.username.data
current_user.email=form.email.data
db.session.commit()
flash(f"Your account has been updated!",'success')
return redirect(url_for('account'))
elif request.method=='GET':
form.username.data=current_user.username
form.email.data=current_user.email
image_file=url_for('static',filename='pictures/'+ current_user.image_file)
return render_template('account.html',title='Account',image_file=image_file,form=form)
@app.route("/dataform",methods=['get','post'])
@login_required
def dataform():
form=DataForm1()
if form.validate_on_submit():
if form.picture.data:
picture_file=save_picture(form.picture.data)
from flask_1.temp import text_extract
text_extract(picture_file)
else:
print("not done")
flash(f"Your image is going for data processing .Just wait for result!",'success')
return redirect(url_for('dataform2'))
image_file=url_for('static',filename='pictures/'+ current_user.image_file)
return render_template('data_extractor.html',image_file=image_file,title='Data Analyzer',form=form)
@app.route("/dataform2",methods=['get','post'])
@login_required
def dataform2():
form=DataForm2()
image_file=url_for('static',filename='pictures/'+ current_user.image_file)
return render_template('data_visual.html',image_file=image_file,form=form)
|
from src.main import db, ma
#---------Models--------------------------
subs = db.Table('subs',
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('channel_id', db.Integer, db.ForeignKey('channels.channel_id'))
)
#------------User Model-----------------
class User(db.Model):
__tablename__= 'users'
user_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
subscriptions = db.relationship('Channel', secondary=subs, backref=db.backref('subscribers', lazy='dynamic'))
def __init__(self, name):
self.name = name
class UserSchema(ma.Schema):
class Meta:
fields = ("user_id", "name")
user_schema = UserSchema()
users_schema = UserSchema(many = True)
#------------Channel Model-----------------
class Channel(db.Model):
__tablename__= 'channels'
channel_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
def __init__(self, name):
self.name = name
class ChannelSchema(ma.Schema):
class Meta:
fields = ("channel_id", "name")
channel_schema = ChannelSchema()
channels_schema = ChannelSchema(many = True)
db.create_all() |
import re
regex = '[+-]?[0-9]+\.[0-9]+'
def find(floatnum):
if (re.search(regex ,floatnum)):
print(True)
else:
print(False)
if __name__ == '__main__':
floatnum = "4"
find(floatnum)
floatnum = "5.000"
find(floatnum)
floatnum = "6.95"
find(floatnum)
floatnum = "0.6"
find(floatnum)
import re
regex="[A-Z]+\d{4}+[A-Z]+"
def pan(pannum):
if (re.search(regex, pannum)):
print(True)
else:
print(False)
if __name__ == '__main__':
pannum = "DECAA8056B"
pan(pannum)
pannum = " BWBPC6417P "
pan(pannum)
pannum = "JaMDD8000M"
pan(pannum)
pannum = "XABJT54321"
pan(pannum) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.