text stringlengths 8 6.05M |
|---|
"""
Compute numerical derivatives on a non-uniform (but strictly increasing) grid,
using quadratic Lagrangian interpolation to generate the difference matrix.
"""
import numpy as np
from scipy.sparse import csr_matrix
def differenceMatrix(x):
"""Generates the difference matrix for a non-uniform (but strictly
increasing) abscissa. We use a two-sided finite difference for the
interior points, with one-sided differences for the boundary. Interior
point coefficients are calculated using the derivatives of the 2nd-order
Lagrange polynomials for the approximation.
ARGS:
x: array.
strictly increasing 1D grid. Must contain at least 2 elements.
"""
n = len(x)
h = x[1:]-x[:n-1] # length n-1 array of grid spacings
# use one-sided differences at point 0 and n; coeffs are derivative of Lagrange
# polynomials at interior points
# a-coefficients below diagonal
a0 = -(2*h[0]+h[1])/(h[0]*(h[0]+h[1]))
ak = -h[1:]/(h[:n-2]*(h[:n-2]+h[1:]))
an = h[-1]/(h[-2]*(h[-1]+h[-2]))
# b-coefficients on diagonal
b0 = (h[0]+h[1])/(h[0]*h[1])
bk = (h[1:] - h[:n-2])/(h[:n-2]*h[1:])
bn = -(h[-1]+h[-2])/(h[-1]*h[-2])
# c-coefficients above diagonal
c0 = -h[0]/(h[1]*(h[0]+h[1]))
ck = h[:n-2]/(h[1:]*(h[:n-2]+h[1:]))
cn = (2*h[-1]+h[-2])/(h[-1]*(h[-2]+h[-1]))
# construct sparse difference matrix
val = np.hstack((a0,ak,an,b0,bk,bn,c0,ck,cn))
row = np.tile(np.arange(n),3)
dex = np.hstack((0,np.arange(n-2),n-3))
col = np.hstack((dex,dex+1,dex+2))
D = csr_matrix((val,(row,col)),shape=(n,n))
return D
def strictlyIncreasing(x):
"""Checks that an input array is strictly increasing.
ARGS:
x: array-like.
Numerical array.
"""
isIncreasing = True
for x1,x2 in zip(x,x[1:]):
isIncreasing = isIncreasing and x1<x2
return isIncreasing
def deriv(*args):
"""Calculates numerical derivative for strictly increasing, non-uniform grid,
using quadrating Lagrangian interpolation to generate the difference matrix.
ARGS:
called with one or two 1-D arrays; 2 arrays must be of equal length.
When called with one array, deriv(y), computes the 1st derivative of
that array using a uniform, integer-spaced grid, similar to numpy.gradient.
When called with two arrays, assumes the form deriv(x,y), calculates the
derivative of y on the non-uniform grid x.
"""
if len(args) == 1:
y = args[0]
x = np.arange(0,len(y),dtype=np.float64)
else:
x = args[0]
y = args[1]
if len(x) != len(y):
raise ValueError("Input arrays must be of equal size.")
if len(x) < 2:
raise ValueError("Input array(s) must contain at least 2 elements")
if not strictlyIncreasing(x):
raise ValueError("Input grid must be strictly increasing")
# condition inputs
try:
x = np.array(x,dtype=np.float64)
y = np.array(y,dtype=np.float64)
except:
raise ValueError("Inputs could not be conditioned to float arrays.")
if len(x.shape) > 1 or len(y.shape) > 1:
raise ValueError("Inputs must be 1-D arrays")
D = differenceMatrix(x)
return D*y
|
#!/usr/bin/env python
# Funtion:
# Filename:
import urllib.request
file = urllib.request.urlopen("http://www.baidu.com")
data = file.read()
dataline = file.readlines()
# print(dataline)
# print(data)
fhandle = open("baidu.html", "wb")
fhandle.write(data)
fhandle.close() |
# !/usr/bin/python
"""
-----------------------------------------------
Versions File in WIP Directory
Written By: Colton Fetters
Version: 1.4
First release: 12/2017
-----------------------------------------------
DEVELOPER NOTES: Queries the status of maya file and
determines whether or not to version
Version History:
v1.4: ovid transcribe has ben added
v1.3: relying more on room operator
v1.2: convention clean up
v1.1: ovid integration
v1.0: initial release
"""
# Import module
import os
# Import ovid
from ovid import transcribe
# Import package
import room_operator
class Core(object):
"""
Core functionality of version control over a maya scene
"""
transcripts = transcribe.Core(loggingName='version', level='info')
LOGGER = transcripts.logger
def __init__(self):
self._TITLE = os.path.splitext(os.path.basename(__file__))[0]
self._OPERATOR = room_operator.Core()
self._VERSION = 1.3
self.LOGGER.debug('Object Version {number} has Been Created'.format(number=self._VERSION))
def existing_file(self):
"""[Creates a Incremented Version of Maya Scene]
[Checks the WIP status of scene and if there are two WIP it versions up the file]
Returns
-------
[Bool]
[Whether a new version was created]
"""
projectPath, scenePath = self._OPERATOR.wip_scene_split()
if os.path.normpath(self._OPERATOR.workingDir) in os.path.normpath(projectPath):
status = self._OPERATOR.get_wip_status()
dirPath, mayaFile = self._OPERATOR.scene_split()
if status is 1:
self.LOGGER.warning('File is not a WIP: {file}'.format(file=mayaFile))
return False
elif status is 2:
flagSplit = '_{wip}_'.format(wip=self._OPERATOR.wipFlag)
wipList = self._OPERATOR.filter_directory_content(dirPath, flagSplit)
cleanList = sorted(wipList)
latestVersion = str(cleanList[-1])
newFileName = self._OPERATOR.version_up(mayaFile, latestVersion, flagSplit)
versionFile = '{dirPath}/{newFileName}{ext}'.format(dirPath=dirPath, newFileName=newFileName,
ext=self._OPERATOR.mayEXT)
createdFile = self._OPERATOR.save_file(versionFile)
self.LOGGER.info('File Versioned: {file}'.format(file=versionFile))
return createdFile
def version_file(self):
"""[Versions Maya Scene]
[Checks whether or not the scene file and current workspace are set]
Returns
-------
[Bool]
[Version complete status]
"""
if self._OPERATOR.check_workspace():
currentFile = self._OPERATOR._currentFile
if currentFile:
versioned = self.existing_file()
return versioned
else:
self.LOGGER.warning('New File')
return True
else:
self.LOGGER.critical('No Workspace Set!')
return False
def main():
objectCore = Core()
objectCore.version_file()
objectCore.LOGGER.info('Successful Run!')
return objectCore
if __name__ == '__main__':
main()
|
#!/usr/bin/python
#\file lambda_local.py
#\brief test lambda with local variable.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Feb.21, 2017
'''This code is written for understanding a strange behavior of lambda. See
https://stackoverflow.com/questions/42380951/python-using-lambda-as-threading-target-causes-strange-behavior
'''
import time,threading
locker= threading.RLock()
def func(obj):
while obj['count']>0:
with locker: print 'thread',obj,id(obj)
obj['count']-= 1
time.sleep(0.1)
with locker: print 'finished',obj,id(obj)
#Two patterns
def make_thread1():
threads= []
objs= {}
for i in range(2):
objs[i]= {}
objs[i]['id']= i
objs[i]['count']= (i+2)*2
t= threading.Thread(name='func'+str(i), target=lambda: func(objs[i]))
t.start()
threads.append(t)
return threads,objs
#
#cf. http://stackoverflow.com/questions/11087047/deferred-evaluation-with-lambda-in-python
def make_thread12():
threads= []
objs= {}
for i in range(2):
objs[i]= {}
objs[i]['id']= i
objs[i]['count']= (i+2)*2
t= threading.Thread(name='func'+str(i), target=lambda i=i: func(objs[i]))
t.start()
threads.append(t)
return threads,objs
class TObj:
def __init__(self):
self.objs= None
def f(self):
func(self.objs)
#One pattern (okay)
def make_thread2():
threads= []
classes= {}
for i in range(2):
classes[i]= TObj()
classes[i].objs= {}
classes[i].objs['id']= i
classes[i].objs['count']= (i+2)*2
t= threading.Thread(name='func'+str(i), target=classes[i].f)
t.start()
threads.append(t)
return threads,classes
if __name__=='__main__':
#threads,objs= make_thread1()
threads,objs= make_thread12()
#threads,classes= make_thread2()
for t in threads:
t.join()
|
../rbtree.py |
import pygame
import random
import os
pygame.init()
window_size = (1600, 1000)
screen = pygame.display.set_mode(window_size)
bgmap = pygame.Surface((16000,16000))
parhFileNames = []
searchPath = ["/home/"]#検索したいpathを設定
def fileSearch():
esw=0
while len(searchPath)>0 and esw==0:
try:
ld = os.listdir(searchPath[0])
except:
del searchPath[0]
continue
for i in range(len(ld)):
fileName=searchPath[0] + ld[i]
if os.path.isdir(fileName):
searchPath.append(fileName+"/")
elif (fileName+" ").count(".jpg ")>0:
parhFileNames.append(fileName)
if len(parhFileNames)%1000==0:
print(len(parhFileNames))
if len(parhFileNames)>50000:esw=1
del searchPath[0]
def bgSet(n):
global bgt,bg
j=0
bgt=[]
bg=[]
for i in range(n):
r=random.randint(0,len(parhFileNames)-1)
try:
bgt.append(pygame.image.load(parhFileNames[r]))
except:
print("error file="+parhFileNames[r])
continue
bg.append(pygame.transform.smoothscale(bgt[j],(400,400)))
j+=1
for i in range(n):
r=random.randint(0,len(bg)-1)
x=random.randint(0,40)
y=random.randint(0,40)
bgmap.blit(bg[r], (x*400, y*400), (0,0, 400, 400))
def main():
end_game = False
while not end_game:
bgSet(10)
for event in pygame.event.get():
if event.type == pygame.QUIT:end_game = True
screen.fill((0,0,0))
screen.blit(bgmap,(0,0),(random.randint(0,14400),random.randint(0,15000),1600,1000))
pygame.display.flip()
pygame.time.delay(100)
pygame.quit()
quit()
fileSearch()
main()
|
inorder = ["D", "B", "E", "A", "F", "C"]
preorder = ["A", "B", "D", "E", "C", "F"]
preorderIndex = 0
class Node:
def __init__(self, data):
self.key = data
self.left = None
self.right = None
def find_element(inorder, inorderStart, inorderEnd, key):
while inorderStart <= inorderEnd:
if inorder[inorderStart] == key:
return inorderStart
inorderStart += 1
return -1
def buildTree(inorder, preorder, inorderStart, inorderEnd):
global preorderIndex
if inorderStart > inorderEnd:
return None
data = preorder[preorderIndex]
print data
newNode = Node(data)
preorderIndex += 1
if inorderStart == inorderEnd:
return newNode
inorderIndex = find_element(inorder, inorderStart, inorderEnd, newNode.key)
if inorderIndex == -1:
return None
newNode.left = buildTree(inorder, preorder, inorderStart, inorderIndex -1)
newNode.right = buildTree(inorder, preorder, inorderIndex+1, inorderEnd)
return newNode
def t_inorder(root):
if root:
t_inorder(root.left)
print root.key
t_inorder(root.right)
def t_preorder(root):
if root:
print root.key
t_preorder(root.left)
t_preorder(root.right)
newroot = buildTree(inorder, preorder, 0, len(inorder)-1)
print "inorder"
t_inorder(newroot)
print "preorder"
t_preorder(newroot)
|
#extract all cuisines
#create a matrix with restaurat id as rows and cuisines as cols
import pandas as pd
import numpy as np
import os
pd.set_option('display.mpl_style', 'default') # Make the graphs a bit prettier
def get_cuisine_info():
base_dir = os.path.dirname(os.path.realpath('__file__'))
business_file_path = os.path.join(base_dir, "yelp_boston_academic_dataset/yelp_academic_dataset_business.json")
with open(business_file_path, 'r') as business_file:
# the file is not actually valid json since each line is an individual
# dict -- we will add brackets on the very beginning and ending in order
# to make this an array of dicts and join the array entries with commas
business_json = '[' + ','.join(business_file.readlines()) + ']'
df = pd.read_json(business_json)
#df = pd.read_json("/Users/avneet/Desktop/MASTER's PROJECT/CS246 Submission/yelp_boston_academic_dataset/yelp_academic_dataset_business.json",orient='columns')
df_categories_rest = df[['categories','business_id']]
#print(df_categories_rest.head(2))
#get unique categories
df2=df['categories']
df2 = df2.values.flatten().tolist() #form a list
df2 = [item for sublist in df2 for item in sublist] #flatten the list
df2 = list(set(df2)) #remove duplicatesa
#form a matrix with unique cat and business id
df = pd.DataFrame(np.random.rand(len(df_categories_rest),len(df2)), index=df_categories_rest['business_id'], columns=df2)
#print(df.shape)
#dataframe initialized with 0 values
df[:] = 0
#one hot encoding
for i in range(len(df_categories_rest)):
val = df_categories_rest['categories'][i]
for v in val:
df[v][i]=1
df['business_id'] = df.index
return df
get_cuisine_info() |
# coding: utf8
# Ferran March Azañero
# 08/02/2018
mano_der="Movil"
mano_izq="Bocadillo"
mano_temp=mano_der
mano_der=mano_izq
mano_izq=mano_temp
print mano_izq
print mano_der
print mano_temp
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: wdf
# datetime: 9/28/2020 8:23 PM
# software: Windows 10 PyCharm
# file name: 暴力破解rar和zip密码.py
# description: 公众号【特里斯丹】
# usage:
# 安装所需三方库: pip install zipfile rarfile
class Passward():
def __init__(self, filename, target_path):
'''
:param filename: 待破解的压缩包
:param target_path: 解压路径
'''
self.tatget_path = target_path
# 根据文件扩展名,使用不同的库
if filename.endswith('.zip'):
import zipfile
self.fp = zipfile.ZipFile(filename)
elif filename.endswith('.rar'):
import rarfile
self.fp = rarfile.RarFile(filename)
else:
raise Exception("只支持zip和rar解压。")
def brutal_extract(self, lengths=[4, ], lower=False, upper=False, digit=False, punctuation=False):
'''
# 遍历所有可能的密码,暴力破解
:param lengths: 密码长度,可以指定所有需要考虑的长度,如[4, 5, 6]等
:param lower: 是否考虑小写字母
:param upper: 是否考虑大写字母
:param digit: 是否考虑数字
:param punctuation: 是否考虑标点符号
:return:
'''
import string # 用于生成密码本
from itertools import combinations # 用于生成所有可能的密码
passward_dict = ""
if lower:
passward_dict += string.ascii_lowercase
if upper:
passward_dict += string.ascii_uppercase
if digit:
passward_dict += string.digits
if punctuation:
passward_dict += string.punctuation
print("密码本:\t{}\n密码长度:\t{}\n".format(passward_dict, lengths))
count = 0
for length in lengths:
for passward in combinations(passward_dict, length):
passward = "".join(passward)
count += 1
print(passward, end=" ")
if self.extract(passward):
print()
print("一共尝试了{}种可能".format(count))
return
print("对不起,暂未找到,请尝试:\n1. 其他密码长度\n2. 包含更多种类的密码字符")
def extract(self, passward):
try:
self.fp.extractall(path=self.tatget_path, pwd=passward.encode())
print()
print('成功破解该压缩包,密码为: ' + passward)
self.fp.close()
return True
except:
pass
def main():
x = Passward(filename="./lee-passward.zip", target_path="./")
x.brutal_extract([4, ], digit=True, lower=True, upper=False, punctuation=False)
if __name__ == '__main__':
main()
|
#If we list all the natural numbers below 10 that are multiples of 3 or 5,
# we get 3, 5, 6 and 9.
#The sum of these multiples is 23.
#
#Find the sum of all the multiples of 3 or 5 below 1000.
from Generators import get_number_from_user
from Generators import euler_one_generator
print(euler_one_generator(get_number_from_user()))
|
import configparser as cp
from pyspark import SparkConf
class SparkConfiguration:
@staticmethod
def getSparkConf():
sparkConf = SparkConf()
conf = cp.ConfigParser()
conf.read(r"application.properties")
for key,value in conf.items("SPARK_APP_CONFIGS"):
sparkConf.set(key,value)
return sparkConf |
#import sys
#input = sys.stdin.readline
def main():
s = input()
k = int( input())
t = set()
for i in range(len(s)-k+1):
t.add(s[i:i+k])
print(len(t))
if __name__ == '__main__':
main()
|
# https://www.hackerrank.com/challenges/list-comprehensions/problem
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
result = []
for i in range(x+1):
for j in range(y+1):
for k in range(z+1):
if(i+j+k != n):
result.append([i, j, k])
print(result)
# https://practice.geeksforgeeks.org/problems/replace-all-0s-with-5/1
def convertFive(n):
# Code here
numberStr = str(n)
result = ""
for index, value in enumerate(numberStr):
if value == "0":
result = result+"5"
else:
result = result+value
return int(result)
# print(convertFive(0))
# https://www.hackerrank.com/challenges/arrays-ds/problem
#!/bin/python3
# Complete the reverseArray function below.
def reverseArray(a):
result = []
for i in range(len(a)):
result.append(a.pop())
return result
# https://practice.geeksforgeeks.org/problems/third-largest-element/1
def thirdLargest(arr, n):
if(n >= 3):
arr.sort()
return arr[n-3]
else:
return -1
print(thirdLargest([18, 21, 10], 3))
|
import pandas as pd
import pickle
import streamlit as st
from PIL import Image
st.set_page_config(
# page_icon='NONE',
initial_sidebar_state='expanded'
)
st.title('r/wallstreetbets or r/SatoshiStreetBets? Predicting the Subreddit of a Post Using NLP')
st.write('**Disclaimer:** I am not a financial advisor. I am in no way providing any financial advice to anyone in any shape or form. This app should be used solely for personal amusement.')
st.write('Use the sidebar to select a page to view.')
page = st.sidebar.selectbox(
'Page',
('About', 'Visualize the data', 'Make a prediction')
)
if page == 'About':
st.subheader('About this project')
st.write('''
This is a Streamlit app that showcases my logistic regression model. The model utilizes natural language processing on two subreddits to predict if a post came from r/wallstreetbets or r/SatoshiStreetBets. The predictions from the model can be used to explore what keywords are unique to each subreddit. Individuals who write text that resembles the posts from r/wallstreetbets may be more interested in stocks, whereas those who write text that resembles the posts from r/SatoshiStreetBets may be more interested in cryptocurrency.
You can get in touch with me on these websites:
- LinkedIn: https://www.linkedin.com/in/seung-woo-choi/
- Portfolio: https://choiseun.github.io/
'''
)
elif page == 'Visualize the data':
# header
st.subheader('Visualize the data')
st.write('''Let's look at some colorful visuals.
Below you can find...
'''
)
visual_1 = Image.open('./images/wsb_word_cloud.png')
st.image(visual_1, caption='Words Unique to r/wallstreetbets')
st.write('''
''')
visual_2 = Image.open('./images/ssb_word_cloud.png')
st.image(visual_2, caption='Words Unique to r/SatoshiStreetBets')
st.write('''
''')
visual_3 = Image.open('./images/shared_word_cloud.png')
st.image(visual_3, caption='Words Common to Both Subreddits')
st.write('''
''')
elif page == 'Make a prediction':
st.subheader('Stocks or Cryptocurrencies? Which type of asset are you more interested in?')
st.write('''
Enter some text to make a prediction! The model is trained on subreddit posts.
The text might visually cut off, but you can write up to 500 characters.
'''
)
st.write('''
''')
# Pickle path
with open('./datasets/production_model.pkl', 'rb') as pickle_in:
model = pickle.load(pickle_in)
# Text input
your_text = st.text_input(
label='Please enter some text:',
value='bitcoin keeps mooning! do you think elon will pump dogecoin or should I keep buying ada?',
max_chars=500
)
# Prediction
predicted_subreddit = model.predict([your_text])[0]
# Labels
my_label = 'None'
my_asset = 'None'
if predicted_subreddit == 0:
my_label = 'r/SatoshiStreetBets'
my_asset = 'cryptocurrencies'
elif predicted_subreddit == 1:
my_label = 'r/wallstreetbets'
my_asset = 'stocks'
# Results
st.write('''
''')
st.subheader('Results:')
st.write(f'The input resembles text you may find on **{my_label}**. You may be more interested in **{my_asset}**.')
# Referenced: https://git.generalassemb.ly/DSIR-Lancelot/streamlit_lesson/blob/master/solution-code/app.py |
'''
Methods that facilitate testing with mocks by enabling the usage of a failure message.
Created on 27.08.2018
@author: FM
'''
from collections import namedtuple
import types # get access to types such as function or method)
## TODO: create class MockAssertionError and implement a string method for better readability
class InvalidMockMethodError(Exception):
"""Raised when a passed mock_method is not actually a method."""
#------------------------------------------------------------------------------
# Generator that allows mocking os.scandir()
#------------------------------------------------------------------------------
class _MockedEntry():
def __init__(self, name, is_file_bool):
self.name = name
self.is_file = (lambda: is_file_bool) # is_file is expected to be a function, thus lambda
def mock_scandir_gen(entry_file_tuples):
"""
Return an iterator that can be used as a scandir mock's return value.
@param entry_file_tuple:
"""
for entry_name, is_file_bool in entry_file_tuples:
yield _MockedEntry(entry_name, is_file_bool)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Class required to perform mock assertions with keyword arguments
#------------------------------------------------------------------------------
KeywordArgTuple = namedtuple('KeywordArgTuple', 'key value')
def _split_args_and_kwargs(argument_list):
"""Split the given list of arguments into a list of plain arguments and a dictionary of keyword arguments."""
kwargs = {}
args = []
for arg in argument_list:
if type(arg) is KeywordArgTuple:
key, value = arg
kwargs.update({key: value})
else:
args.append(arg)
return args, kwargs
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Procedures that allow performing mock assertions and
# print a unittest-like message upon failure
#------------------------------------------------------------------------------
def mock_assert_msg(mock_method, given_args, msg):
"""
Use a mock assertion method and print a certain message upon failure.
@param mock_method: The mock assertion method
@param given_args: A list of arguments to call the mock_method with. Keyword arguments need to be wrapped in a KeywordArgTuple
@param msg: The message to display upon failure
"""
if not isinstance(mock_method, types.MethodType):
raise InvalidMockMethodError("The given method '{}' is not a method.".format(mock_method))
args, kwargs = _split_args_and_kwargs(given_args)
try:
mock_method(*args, **kwargs)
except AssertionError as e:
raise AssertionError(e.args, msg)
def mock_assert_many_msg(calls, msg):
"""
Use one or more mock assertions methods and print a certain message upon failure of one or more of the assertions.
@param calls: An iterable of 'calls', i.e. tuples containing (mock_method, given_args) as explained in mock_assert_msg.
@param msg: The message to display upon failure of at least one of the assertions
"""
try:
for call in calls:
mock_method, given_args = call
if not isinstance(mock_method, types.MethodType):
raise InvalidMockMethodError("The given method '{}' is not a method.".format(mock_method))
args, kwargs = _split_args_and_kwargs(given_args)
mock_method(*args, **kwargs)
except AssertionError as e:
raise AssertionError(e.args, msg)
#------------------------------------------------------------------------------ |
#!/usr/bin/python
import sys #don't know
import cPickle #file input and output
import collections #used for Counter
# def bracketSwitcher(i, hero): #Used to change the input file so all three brackets are tested sequentially
# baseStr = '[' + hero + '678]'
# baseStr = '[6.79][Earth Spirit]'
# if i == 1: #i stands for iteration. Iteration 1 = Normal, Iteration 2 = High, Iteration 3 = Very High
# returnStr = baseStr + "NormalExp.txt"
# return returnStr
# elif i == 2:
# returnStr = baseStr + "HighExp.txt"
# return returnStr
# elif i == 3:
# returnStr = baseStr + "VeryHighExp.txt"
# return returnStr
# else:
# print "Error in bracketSwitcher"
# sys.exit()
# hero = 'Alchemist'
# abilityNumbers = [5365, 5366, 5368, 5369]
# hero = 'Ancient Apparition'
# abilityNumbers = [5345, 5346, 5347, 5348]
# hero = 'Axe'
# abilityNumbers = [5007, 5008, 5009, 5010]
# hero = 'Bloodseeker'
# abilityNumbers = [5015, 5016, 5017, 5018]
# hero = 'Bristleback'
# abilityNumbers = [5548, 5549, 5550, 5551]
# hero = 'Crystal Maiden'
# abilityNumbers = [5126, 5127, 5128, 5129]
# hero = 'Dazzle'
# abilityNumbers = [5233, 5234, 5235, 5236]
# hero = 'Drow Ranger'
# abilityNumbers = [5019, 5020, 5021, 5022]
# hero = 'Earth Spirit'
# abilityNumbers = [5608, 5609, 5610, 5612]
# hero = 'Ember Spirit'
# abilityNumbers = [5603, 5604, 5605, 5606]
# hero = 'Huskar'
# abilityNumbers = [5271, 5272, 5273, 5274]
# hero = 'Jakiro'
# abilityNumbers = [5297, 5298, 5299, 5300]
# hero = 'Legion Commander'
# abilityNumbers = [5595, 5596, 5597, 5598]
# hero = 'Lifestealer'
# abilityNumbers = [5249, 5250, 5251, 5252]
# hero = 'Medusa'
# abilityNumbers = [5504, 5505, 5506, 5507]
# hero = 'Necrolyte'
# abilityNumbers = [5158, 5159, 5160, 5161]
# hero = 'Phoenix'
# abilityNumbers = [5623, 5625, 5626, 5630]
# hero = 'Riki'
# abilityNumbers = [5142, 5143, 5144, 5145]
# hero = 'Silencer'
# abilityNumbers = [5377, 5378, 5379, 5380]
# hero = 'Slark'
# abilityNumbers = [5494, 5495, 5496, 5497]
# hero = 'Sniper'
# abilityNumbers = [5154, 5155, 5156, 5157]
# hero = 'Spectre'
# abilityNumbers = [5334, 5335, 5336, 5337]
# hero = 'Terrorblade'
# abilityNumbers = [5619, 5620, 5621, 5622]
# hero = 'Tinker'
# abilityNumbers = [5150, 5151, 5152, 5153]
# hero = 'Treant Protector'
# abilityNumbers = [5434, 5435, 5436, 5437]
# hero = 'Ursa'
# abilityNumbers = [5357, 5358, 5359, 5360]
# hero = 'Venomancer'
# abilityNumbers = [5178, 5179, 5180, 5181]
hero = 'Viper'
abilityNumbers = [5218, 5219, 5220, 5221]
# hero = 'Windrunner'
# abilityNumbers = [5130, 5131, 5132, 5133]
qNumber=abilityNumbers[0]
wNumber=abilityNumbers[1]
eNumber=abilityNumbers[2]
rNumber=abilityNumbers[3]
# files = ['[6.78][VH][10.8to12]Exp.txt']
# files = ['[6.79][VH][1.7]Exp.txt', '[6.79][VH][1.8]Exp.txt', '[6.79][VH][1.9]Exp.txt', '[6.79][VH][1.10]Exp.txt', '[6.79][VH][1.11]Exp.txt', '[6.79][VH][1.12]Exp.txt', '[6.79][VH][1.13]Exp.txt', '[6.79][VH][1.14]Exp.txt', '[6.79][VH][1.15]Exp.txt', '[6.79][VH][1.16]Exp.txt']
# files = ["[6.80][VH][1.30]Exp.txt", "[6.80][VH][1.31]Exp.txt", "[6.80][VH][2.1]Exp.txt", "[6.80][VH][2.2]Exp.txt", "[6.80][VH][2.3]Exp.txt", "[6.80][VH][2.4]Exp.txt", "[6.80][VH][2.5]Exp.txt"]
files = ["[6.81][VH][6.19][1]Exp.txt", "[6.81][VH][6.19][2]Exp.txt", "[6.81][VH][6.19][3]Exp.txt"]
class abilityData:
level = int()
win = bool()
skills = []
primary = str()
secondary = str()
build = str()
matchID = int()
csPm = float()
kaP10m = float()
dP10m = float()
killPart = float()
items = []
# allData = []
heroData = []
fileName = '[MultiPatch][' + hero + ']AbilityData' + '.txt' #names an output .csv file for export to Excel
fileObject = open(fileName,'r')
allData = cPickle.load(fileObject)
fileObject.close()
for file in files:
fileName = file
fileObject = open(fileName,'r')
matchDetails = cPickle.load(fileObject)
fileObject.close()
print file
for match in matchDetails:
if len(match.players) == 10 and match.mode != 18 and match.duration > 0:
for player in match.players:
if player.heroName == hero:
playerData = abilityData()
playerData.level = player.level
playerData.matchID = match.matchID
playerData.win = player.win
playerData.skills = list(player.skills)
playerData.items = list(player.items)
playerData.csPm = player.cs*60/float(match.duration)
playerData.kaP10m = (player.kills+player.assists)*600/float(match.duration)
playerData.dP10m = (player.deaths)*600/float(match.duration)
holder = player.skills[0:8]
if holder.count(qNumber) == 4:
playerData.primary = 'q'
holder = player.skills[0:10]
if holder.count(wNumber) == 4:
playerData.secondary = 'w'
elif holder.count(eNumber) == 4:
playerData.secondary = 'e'
else:
playerData.secondary = '-'
elif holder.count(wNumber) == 4:
playerData.primary = 'w'
holder = player.skills[0:10]
if holder.count(qNumber) == 4:
playerData.secondary = 'q'
elif holder.count(eNumber) == 4:
playerData.secondary = 'e'
else:
playerData.secondary = '-'
elif holder.count(eNumber) == 4:
playerData.primary = 'e'
holder = player.skills[0:10]
if holder.count(qNumber) == 4:
playerData.secondary = 'q'
elif holder.count(wNumber) == 4:
playerData.secondary = 'w'
else:
playerData.secondary = '-'
else:
playerData.primary = '-'
playerData.secondary = '-'
build = playerData.primary + playerData.secondary
playerData.build = build
# sumTeamLevel = 0
# teamList = [0,1,2,3,4]
# teamList.remove(player.slot)
# if player.team == False:
# for x in range(0,4):
# teamList[x] = teamList[x]+5
# for x in teamList:
# sumTeamLevel = sumTeamLevel + match.players[x].level
# playerData.sumTeamLevel = sumTeamLevel
heroData.append(playerData)
del matchDetails[:]
allData.append(heroData)
print len(allData)
print len(allData[-1])
fileName = '[MultiPatch][' + hero + ']AbilityData' + '.txt' #names an output .csv file for export to Excel
fileObject = open(fileName,'w')
cPickle.dump(allData,fileObject)
fileObject.close()
# qPrim = 0
# qPrimWin = 0
# qw = 0
# qwWin = 0
# qe = 0
# qeWin = 0
# qSplit = 0
# qSplitWin = 0
# wPrim = 0
# wPrimWin = 0
# wq = 0
# wqWin = 0
# we = 0
# weWin = 0
# wSplit = 0
# wSplitWin = 0
# ePrim = 0
# ePrimWin = 0
# eq = 0
# eqWin = 0
# ew = 0
# ewWin = 0
# eSplit = 0
# eSplitWin = 0
# split = 0
# splitWin = 0
# use = len(bracketData)
# for entry in bracketData:
# if entry.build == 'qw':
# qw = qw + 1
# if entry.win == True:
# qwWin = qwWin + 1
# elif entry.build == 'qe':
# qe = qe + 1
# if entry.win == True:
# qeWin = qeWin + 1
# elif entry.build == 'q-':
# qSplit = qSplit + 1
# if entry.win == True:
# qSplitWin = qSplitWin + 1
# elif entry.build == 'wq':
# wq = wq + 1
# if entry.win == True:
# wqWin = wqWin + 1
# elif entry.build == 'we':
# we = we + 1
# if entry.win == True:
# weWin = weWin + 1
# elif entry.build == 'w-':
# wSplit = wSplit + 1
# if entry.win == True:
# wSplitWin = wSplitWin + 1
# elif entry.build == 'eq':
# eq = eq + 1
# if entry.win == True:
# eqWin = eqWin + 1
# elif entry.build == 'ew':
# ew = ew + 1
# if entry.win == True:
# ewWin = ewWin + 1
# elif entry.build == 'e-':
# eSplit = eSplit + 1
# if entry.win == True:
# eSplitWin = eSplitWin + 1
# else:
# split = split + 1
# if entry.win == True:
# splitWin = splitWin + 1
# qPrim = qw + qe + qSplit
# qPrimWin = qwWin + qeWin + qSplitWin
# wPrim = wq + we + wSplit
# wPrimWin = wqWin + weWin + wSplitWin
# ePrim = eq + ew + eSplit
# ePrimWin = eqWin + ewWin + eSplitWin
# qRate = (float(qPrim)/use)*100
# qRate = str('%.2f' % qRate) + '%,'
# qWinRate = (float(qPrimWin)/qPrim)*100
# qWinRate = str('%.2f' % qWinRate) + '%,'
# if qw == 0:
# qwRate = '0%,'
# qwWinRate = '0%,'
# else:
# qwRate = (float(qw)/qPrim)*100
# qwRate = str('%.2f' % qwRate) + '%,'
# qwWinRate = (float(qwWin)/qw)*100
# qwWinRate = str('%.2f' % qwWinRate) + '%,'
# qeRate = (float(qe)/qPrim)*100
# qeRate = str('%.2f' % qeRate) + '%,'
# qeWinRate = (float(qeWin)/qe)*100
# qeWinRate = str('%.2f' % qeWinRate) + '%,'
# if qSplit == 0:
# qSplitRate = '0%,'
# qSplitWinRate = '0%,'
# else:
# qSplitRate = (float(qSplit)/qPrim)*100
# qSplitRate = str('%.2f' % qSplitRate) + '%,'
# qSplitWinRate = (float(qSplitWin)/qSplit)*100
# qSplitWinRate = str('%.2f' % qSplitWinRate) + '%,'
# wRate = (float(wPrim)/use)*100
# wRate = str('%.2f' % wRate) + '%,'
# wWinRate = (float(wPrimWin)/wPrim)*100
# wWinRate = str('%.2f' % wWinRate) + '%,'
# wqRate = (float(wq)/wPrim)*100
# wqRate = str('%.2f' % wqRate) + '%,'
# wqWinRate = (float(wqWin)/wq)*100
# wqWinRate = str('%.2f' % wqWinRate) + '%,'
# weRate = (float(we)/wPrim)*100
# weRate = str('%.2f' % weRate) + '%,'
# weWinRate = (float(weWin)/we)*100
# weWinRate = str('%.2f' % weWinRate) + '%,'
# wSplitRate = (float(wSplit)/wPrim)*100
# wSplitRate = str('%.2f' % wSplitRate) + '%,'
# wSplitWinRate = (float(wSplitWin)/wSplit)*100
# wSplitWinRate = str('%.2f' % wSplitWinRate) + '%,'
# eRate = (float(ePrim)/use)*100
# eRate = str('%.2f' % eRate) + '%,'
# eWinRate = (float(ePrimWin)/ePrim)*100
# eWinRate = str('%.2f' % eWinRate) + '%,'
# eqRate = (float(eq)/ePrim)*100
# eqRate = str('%.2f' % eqRate) + '%,'
# eqWinRate = (float(eqWin)/eq)*100
# eqWinRate = str('%.2f' % eqWinRate) + '%,'
# if ew == 0:
# ewRate = '0%,'
# ewWinRate = '0%,'
# else:
# ewRate = (float(ew)/ePrim)*100
# ewRate = str('%.2f' % ewRate) + '%,'
# ewWinRate = (float(ewWin)/ew)*100
# ewWinRate = str('%.2f' % ewWinRate) + '%,'
# eSplitRate = (float(eSplit)/ePrim)*100
# eSplitRate = str('%.2f' % eSplitRate) + '%,'
# eSplitWinRate = (float(eSplitWin)/eSplit)*100
# eSplitWinRate = str('%.2f' % eSplitWinRate) + '%,'
# splitRate = (float(split)/use)*100
# splitRate = str('%.2f' % splitRate) + '%,'
# splitWinRate = (float(splitWin)/split)*100
# splitWinRate = str('%.2f' % splitWinRate) + '%,'
# useRates1 = [qRate, qwRate, qeRate, qSplitRate]
# useRates2 = [wRate, wqRate, weRate, wSplitRate]
# useRates3 = [eRate, eqRate, ewRate, eSplitRate]
# useRates4 = [splitRate]
# winRates1 = [qWinRate, qwWinRate, qeWinRate, qSplitWinRate]
# winRates2 = [wWinRate, wqWinRate, weWinRate, wSplitWinRate]
# winRates3 = [eWinRate, eqWinRate, ewWinRate, eSplitWinRate]
# winRates4 = [splitWinRate]
# print useRates1
# print useRates2
# print useRates3
# print useRates4
# print winRates1
# print winRates2
# print winRates3
# print winRates4 |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from tsfresh import defaults
from tsfresh.feature_selection.relevance import calculate_relevance_table
class FeatureSelector(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator, for reducing the number of features in a dataset to only those,
that are relevant and significant to a given target. It is basically a wrapper around
:func:`~tsfresh.feature_selection.feature_selector.check_fs_sig_bh`.
The check is done by testing the hypothesis
:math:`H_0` = the Feature is not relevant and can not be added`
against
:math:`H_1` = the Feature is relevant and should be kept
using several statistical tests (depending on whether the feature or/and the target is binary
or not). Using the Benjamini Hochberg procedure, only features in :math:`H_0` are rejected.
This estimator - as most of the sklearn estimators - works in a two step procedure. First, it is fitted
on training data, where the target is known:
>>> import pandas as pd
>>> X_train, y_train = pd.DataFrame(), pd.Series() # fill in with your features and target
>>> from tsfresh.transformers import FeatureSelector
>>> selector = FeatureSelector()
>>> selector.fit(X_train, y_train)
In this example the list of relevant features is empty:
>>> selector.relevant_features
>>> []
The same holds for the feature importance:
>>> selector.feature_importances_
>>> array([], dtype=float64)
The estimator keeps track on those features, that were relevant in the training step. If you
apply the estimator after the training, it will delete all other features in the testing
data sample:
>>> X_test = pd.DataFrame()
>>> X_selected = selector.transform(X_test)
After that, X_selected will only contain the features that were relevant during the training.
If you are interested in more information on the features, you can look into the member
``relevant_features`` after the fit.
"""
def __init__(
self,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
n_jobs=defaults.N_PROCESSES,
chunksize=defaults.CHUNKSIZE,
ml_task="auto",
multiclass=False,
n_significant=1,
multiclass_p_values="min",
):
"""
Create a new FeatureSelector instance.
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature
(currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage
of irrelevant features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param chunksize: Size of the chunks submitted to the worker processes
:type chunksize: int
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumed to be classification,
else regression.
:type ml_task: str
:param multiclass: Whether the problem is multiclass classification. This modifies the way in which features
are selected. Multiclass requires the features to be statistically significant for
predicting n_significant classes.
:type multiclass: bool
:param n_significant: The number of classes for which features should be statistically significant predictors
to be regarded as 'relevant'
:type n_significant: int
:param multiclass_p_values: The desired method for choosing how to display multiclass p-values for each feature.
Either `'avg'`, `'max'`, `'min'`, `'all'`. Defaults to `'min'`, meaning the p-value
with the highest significance is chosen. When set to `'all'`, the attributes
`self.feature_importances_` and `self.p_values` are of type pandas.DataFrame, where
each column corresponds to a target class.
:type multiclass_p_values: str
"""
self.relevant_features = None
self.feature_importances_ = None
self.p_values = None
self.features = None
self.test_for_binary_target_binary_feature = (
test_for_binary_target_binary_feature
)
self.test_for_binary_target_real_feature = test_for_binary_target_real_feature
self.test_for_real_target_binary_feature = test_for_real_target_binary_feature
self.test_for_real_target_real_feature = test_for_real_target_real_feature
self.fdr_level = fdr_level
self.hypotheses_independent = hypotheses_independent
self.n_jobs = n_jobs
self.chunksize = chunksize
self.ml_task = ml_task
self.multiclass = multiclass
self.n_significant = n_significant
self.multiclass_p_values = multiclass_p_values
def fit(self, X, y):
"""
Extract the information, which of the features are relevant using the given target.
For more information, please see the :func:`~tsfresh.festure_selection.festure_selector.check_fs_sig_bh`
function. All columns in the input data sample are treated as feature. The index of all
rows in X must be present in y.
:param X: data sample with the features, which will be classified as relevant or not
:type X: pandas.DataFrame or numpy.array
:param y: target vector to be used, to classify the features
:type y: pandas.Series or numpy.array
:return: the fitted estimator with the information, which features are relevant
:rtype: FeatureSelector
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X.copy())
if not isinstance(y, pd.Series):
y = pd.Series(y.copy())
relevance_table = calculate_relevance_table(
X,
y,
ml_task=self.ml_task,
multiclass=self.multiclass,
n_significant=self.n_significant,
n_jobs=self.n_jobs,
chunksize=self.chunksize,
fdr_level=self.fdr_level,
hypotheses_independent=self.hypotheses_independent,
test_for_binary_target_real_feature=self.test_for_binary_target_real_feature,
)
self.relevant_features = relevance_table.loc[
relevance_table.relevant
].feature.tolist()
if self.multiclass:
p_values_table = relevance_table.filter(regex="^p_value_*", axis=1)
if self.multiclass_p_values == "all":
self.p_values = p_values_table
self.feature_importances_ = 1.0 - p_values_table
self.feature_importances_.columns = (
self.feature_importances_.columns.str.lstrip("p_value")
)
self.feature_importances_ = self.feature_importances_.add_prefix(
"importance_"
)
elif self.multiclass_p_values == "min":
self.p_values = p_values_table.min(axis=1).values
elif self.multiclass_p_values == "max":
self.p_values = p_values_table.max(axis=1).values
elif self.multiclass_p_values == "avg":
self.p_values = p_values_table.mean(axis=1).values
if self.multiclass_p_values != "all":
# raise p_values to the power of n_significant to increase importance
# of features which are significant for more classes
self.feature_importances_ = (
1.0 - self.p_values**relevance_table.n_significant.values
)
else:
self.feature_importances_ = 1.0 - relevance_table.p_value.values
self.p_values = relevance_table.p_value.values
self.features = relevance_table.index.tolist()
return self
def transform(self, X):
"""
Delete all features, which were not relevant in the fit phase.
:param X: data sample with all features, which will be reduced to only those that are relevant
:type X: pandas.DataSeries or numpy.array
:return: same data sample as X, but with only the relevant features
:rtype: pandas.DataFrame or numpy.array
"""
if self.relevant_features is None:
raise RuntimeError("You have to call fit before.")
if isinstance(X, pd.DataFrame):
return X.copy().loc[:, self.relevant_features]
else:
return X[:, self.relevant_features]
|
import requests
ID = 1
# RUN = True
class CaptchaService:
def __init__(self, key, sitekey, url):
self.id = ID
self.key= key
self.data = {
'key' : key,
'method' : 'userrecaptcha',
'googlekey' : sitekey,
'pageurl' : url,
'json' : '1'
}
def get(self):
s = requests.Session()
try:
r = s.post("http://2captcha.com/in.php", params=self.data)
r = r.json()
res_ID = r['request']
while True:
try:
r = s.get("http://2captcha.com/res.php?key={0}&action=get&id={1}&json=1".format(self.key, res_ID))
r = r.json()
if r['status'] != 0:
return r['request']
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, requests.exceptions.ConnectTimeout):
print("connnection error, retrying")
except Exception as e:
raise e
except Exception as e:
print(e) |
Your input
2736
Output
7236
Expected
7236
Your input
9973
Output
9973
Expected
9973 |
from itertools import product
N = int( input())
ans = 0
for i in range(3,10):
for x in product("357", repeat = i):
z = ""
V = [0]*10
for l in x:
z += l
V[ int( l)] = 1
if int(z) <= N and sum(V) == 3:
ans += 1
print(ans)
|
# -*- coding: utf-8 -*-
import re, json
import MySQLdb
import sys, argparse
reload(sys)
sys.setdefaultencoding('utf-8')
def trunc_db(conn):
cur = conn.cursor()
cur.execute('SET NAMES utf8')
cur.execute("TRUNCATE TABLE ModelViews")
conn.commit()
cur.close()
def batch_insert(conn, values):
cur = conn.cursor()
sql = """
INSERT INTO ModelViews(Topic, Rate, Context)
VALUES (%s, %s, %s)
"""
cur.executemany(sql, values)
conn.commit()
cur.close()
class TopicCorpus(object):
def __init__(self, fn):
self.fn = fn
self.topic_re = re.compile('TOPIC: (\d+) (\d+\.\d+)')
def __iter__(self):
with open(self.fn) as in_fd:
arr = []
t = None
rate = None
for line in in_fd:
line = line.strip()
if len(line) == 0:
continue
match = self.topic_re.match(line)
if match:
if t is not None and rate is not None and len(arr) > 0:
yield (t, rate, arr[:20])
t = int(match.group(1))
rate = float(match.group(2))
arr = []
else:
feas = line.split(' ')
word = feas[0]
rate = float(feas[1])
arr.append({"word": word, "rate": rate})
def import_model_view(fn, host, db, user, passwd):
conn = MySQLdb.connect(db=db, host=host ,user=user, passwd=passwd, charset="utf8")
trunc_db(conn)
tc = TopicCorpus(fn)
values = []
batch_size = 100
for t in tc:
values.append((t[0], t[1], json.dumps(t[2])))
if len(values) == batch_size:
batch_insert(conn, values)
values = []
if len(values) > 0:
batch_insert(conn, values)
conn.close()
|
from config import app
from routes import *
#Rodar Programa ao executar esse módulo
if __name__ == '__main__':
app.run(debug=True)
|
"""
给定一个大小为 n 的数组,找到其中的多数元素。多数元素是指在数组中出现次数 大于 ⌊ n/2 ⌋ 的元素。
你可以假设数组是非空的,并且给定的数组总是存在多数元素。
示例 1:
输入:[3,2,3]
输出:3
示例 2:
输入:[2,2,1,1,1,2,2]
输出:2
进阶:
尝试设计时间复杂度为 O(n)、空间复杂度为 O(1) 的算法解决此问题。
Related Topics 位运算 01-数组 分治算法
"""
def majority_element(nums):
nums.sort()
return nums[len(nums) // 2]
majority_element([2,2,1,1,1,2,2])
|
import plotly.graph_objects as go
import plotly.offline as po
from plotly.subplots import make_subplots
from datetime import datetime
import pandas as pd
import argparse
import pickle
import os
import warnings
import plotly.graph_objects as go
import plotly.offline as po
from plotly.subplots import make_subplots
warnings.filterwarnings("ignore", category=FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
#import tensorflow.keras.applications.efficientnet as efn
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from architecture import models
print(f'Wersja tensorflow:{tf.__version__}')
ap = argparse.ArgumentParser()
ap.add_argument('-e', '--epochs', default=1, help='Określ liczbę epok', type=int)
args = vars(ap.parse_args())
MODEL_NAME = 'VGG16'
LEARNING_RATE = 0.001
EPOCHS = args['epochs']
BATCH_SIZE = 32
INPUT_SHAPE = (224, 224, 3)
TRAIN_DIR = 'images/train'
VALID_DIR = 'images/valid'
def plot_hist(history, filename):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
fig = make_subplots(rows=2, cols=1, subplot_titles=('Accuracy', 'Loss'))
fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['accuracy'], name='train_accuracy',
mode='markers+lines', marker_color='#f29407'), row=1, col=1)
fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['val_accuracy'], name='valid_accuracy',
mode='markers+lines', marker_color='#0771f2'), row=1, col=1)
fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['loss'], name='train_loss',
mode='markers+lines', marker_color='#f29407'), row=2, col=1)
fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['val_loss'], name='valid_loss',
mode='markers+lines', marker_color='#0771f2'), row=2, col=1)
fig.update_xaxes(title_text='Liczba epok', row=1, col=1)
fig.update_xaxes(title_text='Liczba epok', row=2, col=1)
fig.update_yaxes(title_text='Accuracy', row=1, col=1)
fig.update_yaxes(title_text='Loss', row=2, col=1)
fig.update_layout(width=1400, height=1000, title=f"Metrics: {MODEL_NAME}")
po.plot(fig, filename=filename, auto_open=False)
#lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lambda epoch: min_lr *)
#augumentacja danych
train_datagen = ImageDataGenerator(
rotation_range=30,
rescale=1. / 255.,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
valid_datagen = ImageDataGenerator(rescale=1. / 255.)
train_generator = train_datagen.flow_from_directory(
directory=TRAIN_DIR,
target_size=(224, 224),
color_mode="rgb",
batch_size=BATCH_SIZE,
shuffle=True,
class_mode='binary'
)
valid_generator = valid_datagen.flow_from_directory(
directory=VALID_DIR,
target_size=(224, 224),
color_mode="rgb",
batch_size=BATCH_SIZE,
shuffle=True,
class_mode='binary'
)
#model = efn.EfficientNetB0(include_top=False, weights='imagenet')
architectures = {MODEL_NAME: models.VGG16}
architecture = architectures[MODEL_NAME](input_shape=INPUT_SHAPE)
model = architecture.build()
model.compile(
optimizer=Adam(learning_rate=LEARNING_RATE),
loss='binary_crossentropy',
metrics=['accuracy']
)
model.summary()
dt = datetime.now().strftime('%d_%m_%Y_%H_%M')
filepath = os.path.join('output', 'model_' + dt + '.hdf5')
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_accuracy', save_best_only=True)
print('[INFO] Trenowanie modelu')
history = model.fit_generator(
generator=train_generator,
steps_per_epoch=train_generator.samples // BATCH_SIZE,
validation_data=valid_generator,
validation_steps=valid_generator.samples // BATCH_SIZE,
epochs=EPOCHS,
callbacks=[checkpoint]
)
print('[INFO] Eksport wykresu do pliku html...')
filename = os.path.join('output', 'report_' + dt + '.html')
plot_hist(history, filename=filename)
print('[INFO] Eksport etykiet do pliku...')
with open(r'output/labels.pickle', 'wb') as file:
file.write(pickle.dumps(train_generator.class_indices))
print('[INFO] Koniec')
# uruchomienie z konsoli:
# $ python train.py -e 20
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('cr7.jpg')
color = ('b','g','r')
for i,col in enumerate(color):
hist = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(hist,color = col)
plt.xlim([0,256])
plt.show() |
import numpy as np
import scipy.optimize as opt
# 矩阵权重随机初始化
# theta1(25, 101) theta2(3, 26)
def random_init(shape1, shape2):
theta1 = np.random.uniform(-0.12, 0.12, shape1)
theta2 = np.random.uniform(-0.12, 0.12, shape2)
return theta1, theta2
def sigmoid(z):
return 1. / (1. + np.exp(-z))
def gradient_sigmoid(z):
return sigmoid(z) * (1 - sigmoid(z))
# 前向传播
def forward(theta, x):
t1, t2 = deserialize(theta)
a1 = np.insert(x, 0, 1, axis=1) # (x, 101)
z2 = a1 @ t1.T # (x, 25)
a2 = sigmoid(z2)
a2 = np.insert(a2, 0, 1, axis=1) # (x, 26)
z3 = a2 @ t2.T # (x, 3)
a3 = sigmoid(z3)
return a1, z2, a2, z3, a3
def cost(theta, x, y):
a1, z2, a2, z3, hx = forward(theta, x)
m = len(x)
first = -y * np.log(hx)
second = (1 - y) * np.log(1 - hx)
return np.sum(first - second) / m
def regularized_cost(theta, x, y, l=1.):
Cost = cost(theta, x, y)
m = len(x)
# 正则项不需要惩罚bias
t1, t2 = deserialize(theta)
reg = np.sum(t1[:, 1:] ** 2) + \
np.sum(t2[:, 1:] ** 2)
return Cost + l * reg / (2 * m)
def gradient(theta, x, y):
t1, t2 = deserialize(theta)
a1, z2, a2, z3, hx = forward(theta, x)
d3 = hx - y # (x, 3)
# (x, 3) @ (3, 25) * (x, 25) = (x, 25)
d2 = d3 @ t2[:, 1:] * gradient_sigmoid(z2) # (x, 25)
D2 = d3.T @ a2 # (3, x) @ (x, 26) = (3, 26)
D1 = d2.T @ a1 # (25, x) @ (x, 101) = (25, 101)
return (1. / len(x)) * serialize(D1, D2)
def regularized_gradient(theta, x, y, l=1.):
D = gradient(theta, x, y)
D1, D2 = deserialize(D) # 1 / m * Di
t1, t2 = deserialize(theta)
# 不惩罚bias项
t1[:, 0] = 0
t2[:, 0] = 0
regD1 = D1 + (l / len(x)) * t1
regD2 = D2 + (l / len(x)) * t2
return serialize(regD1, regD2)
def serialize(t1, t2):
theta = np.r_[t1.flatten(),
t2.flatten()]
return theta
def deserialize(theta):
return theta[: 25 * 101].reshape(25, 101), theta[25 * 101:].reshape(3, 26)
def neural_network(x, y, l=1.):
theta1, theta2 = random_init((25, 101), (3, 26)) # theta1(25, 101) theta2(3, 26)
init_theta = serialize(theta1, theta2)
res = opt.minimize(fun=regularized_cost,
x0=init_theta,
args=(x, y, l),
method='TNC',
jac=regularized_gradient,
options={'maxiter': 450})
return res.x
def accuracy(theta, x, y):
a1, z2, a2, z3, hx = forward(theta, x) # hx (test, 3)
y_pred = np.argmax(hx, axis=1) - 1
return np.sum([1 if y_pred[i] == y[i] else 0 for i in range(len(x))]) / len(x)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
PAD_WORD_ID = 0
UNK_WORD_ID = 1
END_WORD_ID = 2
PAD_CHAR = 261
BOW_CHAR = 259
EOW_CHAR = 260
ALM_MAX_VOCAB_SIZE = 20000
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
|
# --------------------------------------------------------------------
import os
import functools
# --------------------------------------------------------------------
transposed = []
matrix = [[1, 2, 3, 4], [4, 5, 6, 8]]
for i in range (len (matrix[0])): # Loop over row length (outer loop)
transposed_row = []
for row in matrix: # Loop over (2) rows
transposed_row.append (row[i])
transposed.append (transposed_row)
print (transposed)
# --------------------------------------------------------------------
matrix = [[1,2], [3,4], [5,6], [7,8]]
transposed = [[row[i] for row in matrix] for i in range (2)] # First i, then rows in matrix
print (transposed)
# --------------------------------------------------------------------
matrix = [[0, 0, 0], [1, 1, 1], [2, 2, 2],]
flat = [num for row in matrix for num in row] # From left to right
print (flat)
# --------------------------------------------------------------------
# Boils down to *result* = [*transform* *iteration* *filter*]
mystring = "Hello 12345 World"
numbers = [(int (x)) *2 for x in mystring if x.isdigit()]
print (numbers)
numbers = range (10)
new_list = [n**2 for n in numbers if n%2==0]
print (new_list)
new_list = [n**2 for n in numbers if not n%2] # Same as above
print (new_list)
# --------------------------------------------------------------------
# Three solutions to same problem
kilometer = [39.2, 36.5, 37.3, 37.8]
feet = list (map (lambda x: float (3280.8399) * x, kilometer))
print (feet)
feet2 = list ((float (3280.8399) * x for x in kilometer))
print (feet2)
feet2 = [float (3280.8399) * x for x in kilometer]
print (feet2)
# --------------------------------------------------------------------
sum_feet = functools.reduce (lambda x, y: x + y, feet) # Use first two elements, then the 3rd with the result. Add numbers together
print (sum_feet)
sum_feet = sum ([x for x in feet2])
print (sum_feet)
# --------------------------------------------------------------------
divided = [x for x in range(100) if x % 2 == 0 if x % 10 == 0] # Conditional compound
print (divided)
divided = [x for x in range(100) if x % 10 == 0 or x % 5 == 0]
print (divided)
divided = list (filter (lambda x: x % 10 == 0 or x % 5 == 0, range (100)))
print (divided)
# --------------------------------------------------------------------
two_dim = list (([1, 2], [3, 4], [5, 6])) # Flatten
print (two_dim)
flattened = [y for x in two_dim for y in x] # Left to right
print (flattened)
transposed = [[x [i] for x in two_dim] for i in range (2)]
print (transposed)
list5 = [x for x in two_dim]
print (list5)
for x, y in two_dim:
print (x, y)
# --------------------------------------------------------------------
two_dim = list (([1, 2], [3, 4, 7], [5, 6, 8, 9])) # Flatten
flattened = [i for x in two_dim for i in range (len (x))]
print (flattened)
list_of_list = [[1,2,3],[4,5,6],[7,8]]
lol = [f for x in list_of_list for f in x]
print (lol)
# --------------------------------------------------------------------
os.system ("pause")
|
#VBO/IBO/DSC Model Writing from Blender
import zipfile
import struct
import sys
import os
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
print("\n*** Welcome to Tiger/Line Zip Reader. ***\n")
print("Running in",sys.argv[0])
print(sys.version_info);
print("")
#fmt_a = ">iiiiiii"
#fmt_a = ">i"
#fmt_b = "<iidddddddd"
zf = zipfile.ZipFile("C:\\Users\\nicholas.waun\\Downloads\\tl_2014_us_zcta510.zip")
data = zf.open("tl_2014_us_zcta510.shp");
#data = zf.open("tl_2014_us_zcta510.shx");
#print("9994")
#print(struct.pack(">i",9994))
#print("/9994")
#print(struct.unpack(">iiiiiiii",bytearray(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\xf2\xad\xea\xe8\x03\x00\x00\x05\x00\x00\x00")))
#print(b"\x5b\x5d")
records = ""
try:
# for line in data:
for x in range(0,10):
line = data.readline()
values = line.strip()
print(len(values))
print(values)
try:
records += line
if(len(line) > 96):
file_code = struct.unpack(">i",bytearray(line[:4]))
print("file code:",file_code)
unused = struct.unpack(">iiiii",bytearray(line[:20]))
print("unused:",unused)
file_length = struct.unpack(">i",bytearray(line[20:24]))
print("file length:",file_length)
version = struct.unpack("<i",bytearray(line[24:28]))
print("version",version)
shape_type = struct.unpack("<i",bytearray(line[28:32]))
print("shape type:",shape_type)
bbox = struct.unpack("<dddd",bytearray(line[32:64]))
print("bounding box:",bbox)
bbox = struct.unpack("<dddd",bytearray(line[64:96]))
print("bounding box (z and m):",bbox)
#record_header = struct.unpack(">ii",bytearray(line[96:104]))
#print("record header:",record_header)
except:
print(sys.exc_info()[0])
finally:
data.close()
zf.close()
|
from setuptools import setup, find_packages
from openspending.ui import __version__
setup(
name='openspending',
version=__version__,
description='OpenSpending',
author='Open Knowledge Foundation',
author_email='okfn-help at lists okfn org',
url='http://github.com/okfn/openspending',
install_requires=[
"Pylons==1.0",
"Genshi==0.6",
"pymongo==1.11",
"repoze.who==2.0b1",
"repoze.who-friendlyform==1.0.8",
"Unidecode==0.04.7",
"python-dateutil==1.5",
"solrpy==0.9.4",
"pyutilib.component.core==4.3.1",
"Babel==0.9.6",
"ckanclient==0.7",
"colander==0.9.3",
"distribute==0.6.19",
"mock==0.7.2"
],
setup_requires=[
"PasteScript==1.7.3"
],
packages=find_packages(),
namespace_packages=['openspending', 'openspending.plugins'],
test_suite='nose.collector',
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points={
'paste.app_factory': [
'main = openspending.ui.config.middleware:make_app'
],
'paste.app_install': [
'main = pylons.util:PylonsInstaller'
],
'paste.paster_command': [
'db = openspending.command:DbCommand',
'graph = openspending.command:GraphCommand',
'removeentries = openspending.command:RemoveEntriesCommand',
'solr = openspending.command:SolrCommand',
'grantadmin = openspending.command:GrantAdminCommand'
]
}
)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Plivo Team. See LICENSE.txt for details.
import unittest
import ujson as json
from sharq_server import setup_server
class SharQServerTestCase(unittest.TestCase):
def setUp(self):
# get test client & redis connection
server = setup_server('./sharq.conf')
self.app = server.app.test_client()
self.r = server.sq._r
# flush redis
self.r.flushdb()
def test_root(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.data), {'message': 'Hello, SharQ!'})
def test_enqueue(self):
request_params = {
'job_id': 'ef022088-d2b3-44ad-bf0d-a93d6d93b82c',
'payload': {'message': 'Hello, world.'},
'interval': 1000
}
response = self.app.post(
'/enqueue/sms/johdoe/', data=json.dumps(request_params),
content_type='application/json')
self.assertEqual(response.status_code, 201)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'queued')
request_params = {
'job_id': 'ef022088-d2b3-44ad-bf1d-a93d6d93b82c',
'payload': {'message': 'Hello, world.'},
'interval': 1000,
'requeue_limit': 10
}
response = self.app.post(
'/enqueue/sms/johdoe/', data=json.dumps(request_params),
content_type='application/json')
self.assertEqual(response.status_code, 201)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'queued')
def test_dequeue_fail(self):
response = self.app.get('/dequeue/')
self.assertEqual(response.status_code, 404)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'failure')
response = self.app.get('/dequeue/sms/')
self.assertEqual(response.status_code, 404)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'failure')
def test_dequeue(self):
# enqueue a job
request_params = {
'job_id': 'ef022088-d2b3-44ad-bf0d-a93d6d93b82c',
'payload': {'message': 'Hello, world.'},
'interval': 1000
}
self.app.post(
'/enqueue/sms/johndoe/', data=json.dumps(request_params),
content_type='application/json')
# dequeue a job
response = self.app.get('/dequeue/sms/')
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'success')
self.assertTrue(
response_data['job_id'], 'ef022088-d2b3-44ad-bf0d-a93d6d93b82c')
self.assertEqual(
response_data['payload'], {'message': 'Hello, world.'})
self.assertEqual(response_data['queue_id'], 'johndoe')
self.assertEqual(response_data['requeues_remaining'], -1) # from the config
def test_finish_fail(self):
# mark a non existent job as finished
response = self.app.post(
'/finish/sms/johndoe/ef022088-d2b3-44ad-bf0d-a93d6d93b82c/')
self.assertEqual(response.status_code, 404)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'failure')
def test_finish(self):
# enqueue a job
request_params = {
'job_id': 'ef022088-d2b3-44ad-bf0d-a93d6d93b82c',
'payload': {'message': 'Hello, world.'},
'interval': 1000
}
self.app.post(
'/enqueue/sms/johndoe/', data=json.dumps(request_params),
content_type='application/json')
# dequeue a job
self.app.get('/dequeue/sms/')
# mark the job as finished
response = self.app.post(
'/finish/sms/johndoe/ef022088-d2b3-44ad-bf0d-a93d6d93b82c/')
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'success')
def test_interval(self):
# enqueue a job
request_params = {
'job_id': 'ef022088-d2b3-44ad-bf0d-a93d6d93b82c',
'payload': {'message': 'Hello, world.'},
'interval': 1000
}
self.app.post(
'/enqueue/sms/johndoe/', data=json.dumps(request_params),
content_type='application/json')
# change the interval
request_params = {
'interval': 5000
}
response = self.app.post(
'/interval/sms/johndoe/', data=json.dumps(request_params),
content_type='application/json')
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'success')
def test_interval_fail(self):
# change the interval
request_params = {
'interval': 5000
}
response = self.app.post(
'/interval/sms/johndoe/', data=json.dumps(request_params),
content_type='application/json')
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'failure')
def test_metrics(self):
response = self.app.get('/metrics/')
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'success')
self.assertIn('queue_types', response_data)
self.assertIn('enqueue_counts', response_data)
self.assertIn('dequeue_counts', response_data)
def test_metrics_with_queue_type(self):
response = self.app.get('/metrics/sms/')
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'success')
self.assertIn('queue_ids', response_data)
def test_metrics_with_queue_type_and_queue_id(self):
response = self.app.get('/metrics/sms/johndoe/')
response_data = json.loads(response.data)
self.assertEqual(response_data['status'], 'success')
self.assertIn('queue_length', response_data)
self.assertIn('enqueue_counts', response_data)
self.assertIn('dequeue_counts', response_data)
def tearDown(self):
# flush redis
self.r.flushdb()
if __name__ == '__main__':
unittest.main()
|
{
### Finance ###
"aidBill": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "bill",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "from"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "to"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "owner"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "amount"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "currency"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "category"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "paymentmode"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetBill"
},
"aidDebt": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "debt",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "from"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "to"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "start"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "end"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "amount"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "balance"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetDebt"
},
"aidIncome": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "income",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "from"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "to"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "owner"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "amount"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "currency"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "category"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetIncome"
},
"aidFinanceReport": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "financereport",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "month"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "income"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "deposit"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "debt"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "consume"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "balance"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "category"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "paymentmode"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "incomeyear"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "deposityear"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "debtyear"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "consumeyear"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "balanceyear"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "categoryyear"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "paymentmodeyear"
}]
},
W3Const.w3ApiHandler: "EJGetFinanceReport"
},
"aidFinanceEvent": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "financeevent",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "budget"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "balance"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetFinanceEvent"
},
"aidAddBill": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addbill",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "owner"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "amount"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "currency"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "category"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "paymentmode"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "event"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddBill"
},
"aidAddDebt": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "adddebt",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "start"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "end"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "amount"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "balance"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddDebt"
},
"aidAddIncome": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addincome",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "owner"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "amount"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "currency"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "category"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddIncome"
},
"aidAddFinanceEvent": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addfinanceevent",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "budget"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddFinanceEvent"
},
### Note ###
"aidNote": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "note",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "id"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "title"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "tag"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "type"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "error"
}]
},
W3Const.w3ApiHandler: "EJGetNote"
},
"aidNoteTitle": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "notetitle",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "tag"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "id"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "title"
}]
},
W3Const.w3ApiHandler: "EJGetNoteTitle"
},
"aidAddNote": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addnote",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "title"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "tag"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "type"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiPost: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}],
W3Const.w3ApiHandler: "EJAddNote",
W3Const.w3ApiListener: [
"EJGotoNotePage(w3PlaceHolder_1, w3PlaceHolder_2)"
]
},
"aidModifyNote": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "modifynote",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "id"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiPost: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}],
W3Const.w3ApiHandler: "EJModifyNote",
W3Const.w3ApiListener: [
"EJGotoNotePage(w3PlaceHolder_1, w3PlaceHolder_2)"
]
},
### Calendar ###
"aidCalendarEvent": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "calendarevent",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "month"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "repeatmonth"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetCalendarEvent"
},
"aidAddCalendarEvent": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addcalendarevent",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "repeatmonth"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddCalendarEvent",
W3Const.w3ApiListener: [
"EJGotoCalendarPage(w3PlaceHolder_1, w3PlaceHolder_2)"
]
},
### Journey ###
"aidJourney": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "journey",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "from"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "to"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "id"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "traveler"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "event"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "balance"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetJourney"
},
"aidAddJourney": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addjourney",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "traveler"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "event"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddJourney"
},
"aidAddJourneyPlace": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addjourneyplace",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "jid"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "latitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "longitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "remark"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddJourneyPlace"
},
"aidAddPOI": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addpoi",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "latitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "longitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddPOI"
},
"aidAddPOIToJourney": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "addpoitojourney",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "poi"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "journey"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "remark"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiHandler: "EJAddPOIToJourney"
},
"aidJourneyPlace": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "journeyplace",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "jid"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "latitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "longitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "remark"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetJourneyPlace"
},
"aidAllPlace": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "allplace",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "latitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "longitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "datetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "remark"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetAllPlace"
},
"aidAllPOI": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "allpoi",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "poiid"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "name"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "latitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: "longitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "note"
}]
},
W3Const.w3ApiHandler: "EJGetAllPOI"
},
### Others ###
"aidLogin": {
W3Const.w3ElementType: W3Const.w3TypeApi,
W3Const.w3ApiName: "login",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "username"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeString,
W3Const.w3ApiDataValue: "password"
}],
W3Const.w3ApiResult: {
W3Const.w3ApiResultData: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeNum,
W3Const.w3ApiDataValue: W3Const.w3SessionKey
}]
},
W3Const.w3ApiHandler: "EJLogin"
}
}
|
import pygame
class GameObject:
"""
An abstract class laying the foundation for all objects in the engine
"""
def __init__(self):
"""
GameObject Constructor
Returns a GameObject object.
TODO: set default values for position, dimension and velocity as optional parameters
"""
self.position = [50.0, 50.0]
self.maxVel = 700.0
def update(self, tDelta, actions):
"""
An abstract method for updating the state of a gameobject.
"""
raise NotImplementedError("Update not implemented in GameObject child class")
def render(self, win):
"""
An abstract method for rendering the gameObject to a provided display surface.
"""
raise NotImplementedError("Render not implemented in GameObject child class")
|
from spidev import SpiDev
import RPi.GPIO as GPIO
import time
import logging
#############################################################################
# ADC 컨버터 함수 설정
class MCP3008:
def __init__(self, bus = 0, device = 0):
self.bus, self.device = bus, device
self.spi = SpiDev()
self.open()
def open(self):
self.spi.open(self.bus, self.device)
def read(self, channel = 0):
adc = self.spi.xfer2([1, (8 + channel) << 4, 0])
data = ((adc[1] & 3) << 8) + adc[2]
return data
def close(self):
self.spi.close()
#############################################################################
# log
logger = logging.getLogger('myApp')
hand = logging.FileHandler('myapp_.log')
# 생성시간, 로그레벨 , 프로세스ID, 메시지
formatter = logging.Formatter('%(asctime)s %(levelname)s %(process)d %(message)s')
# 파일핸들러에 문자열 포메터를 등록
hand.setFormatter(formatter)
logger.addHandler(hand)
logger.setLevel(logging.INFO)
#############################################################################
LED=16
pir_s=25
GPIO_TRIGGER = 18
GPIO_ECHO = 25
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED, GPIO.OUT)
GPIO.setup(pir_s, GPIO.IN)
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
GPIO.setup(GPIO_ECHO,GPIO.IN)
adc = MCP3008()
try:
while True:
adc_value = adc.read( channel = 0 ) # 조도센서 값 읽어오기
stop = 0
start = 0
# 먼저 트리거 핀을 OFF 상태로 유지한다
GPIO.output(GPIO_TRIGGER, False)
time.sleep(2)
# 10us 펄스를 내보낸다.
# 파이썬에서 이 펄스는 실제 100us 근처가 될 것이다.
# 하지만 HC-SR04 센서는 이 오차를 받아준다.
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.1)
GPIO.output(GPIO_TRIGGER, False)
# 에코 핀이 ON되는 시점을 시작 시간으로 잡는다.
while GPIO.input(GPIO_ECHO)==0:
start = time.time()
# 에코 핀이 다시 OFF되는 시점을 반사파 수신 시간으로 잡는다.
while GPIO.input(GPIO_ECHO)==1:
stop = time.time()
# Calculate pulse length
elapsed = stop-start
# 초음파는 반사파이기 때문에 실제 이동 거리는 2배이다. 따라서 2로 나눈다.
# 음속은 편의상 340m/s로 계산한다. 현재 온도를 반영해서 보정할 수 있다.
if (stop and start):
distance = (elapsed * 34000.0) / 2
print("Distance : %.1f cm" % distance)
if distance <= 150:
logger.warning('사람 있음')
if (adc_value == 0): # 조도센서 값이 0이면 (=>어두우면)
print("LED ON : PIR %d"%adc_value)
GPIO.output(LED, True) # 불이 켜짐
logger.info('불켜짐')
time.sleep(2)
else:
print("LED OFF : PIR %d"%adc_value) # 조도센서 값이 0이 아니면 (=>빛이 조금이라도 있으면)
GPIO.output(LED, False) # 불이 꺼짐
logger.debug('틀렸음~!!')
time.sleep(2)
else:
GPIO.output(LED, False)
logger.error('사람 없음!')
except KeyboardInterrupt:
adc.close()
GPIO.cleanup()
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Reto:
"""
Reto #5 “Suma y multiplicación”
Instrucciones: añadiendo un extra al reto anterior ahora el usuario ingresará 3 números,
sumarás los 2 primeros y el resultado será multiplicado por el tercero.
Añade las consideraciones del punto decimal del reto anterior.
Ejemplo:
Datos de entrada:2, 3, 4
Resultado:20
"""
def main():
number_one = float(raw_input("Ingrese un numero: "))
number_two = float(raw_input("Ingrese un numero: "))
number_three = float(raw_input("Ingrese un numero: "))
result = (number_one + number_two) * number_three
print("({} + {}) * {} = {:.2f}".format(number_one, number_two, number_three, result))
if __name__ == '__main__':
main() |
from typing import List
from pymongo.collection import Collection
from filemanager.dao.file import File
class FileListDao:
def get_all_files(self):
pass
def get_all_files_by_ids(self,file_ids:List[str]):
pass
class FileListMongoDBDao(FileListDao):
def __init__(self,collection:Collection):
self.collection=collection
def get_all_files(self):
files=self.collection.find()
res=[]
for file in files:
res.append(File(**file))
return res
def get_all_files_by_ids(self,file_ids:List[str]):
files=self.collection.find({'_id':{'$in':file_ids}})
return [File(**file) for file in files] |
# Copyright 2018 Jae Yoo Park
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import tensorflow as tf
import numpy as np
import utils
import os
import json
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# Path & Hyperparameter
NUM_SEGMENTS = 400 # Should be larger : 400
INPUT_PATHS = {'train': { # THUMOS 14 Validation Set
'rgb': '../train_data/rgb_features', # rgb
'flow': '../train_data/flow_features', # flow
},
'test': { # THUMOS 14 Test Set
'rgb': '../test_data/rgb_features', # rgb
'flow': '../test_data/flow_features', # flow
},
}
TEST_NUM = 210 # I excluded two falsely annotated videos, 270, 1496, following the SSN paper (https://arxiv.org/pdf/1704.06228.pdf)
ALPHA = 0.5
def test(sess, model, init, input_list, test_iter):
ckpt = input_list['ckpt']
scale = input_list['scale']
class_threshold = input_list['class_threshold']
rgb_saver = tf.train.Saver()
flow_saver = tf.train.Saver()
test_vid_list = open('THUMOS14_test_vid_list.txt', 'r') # file for matching 'video number' and 'video name'
lines = test_vid_list.read().splitlines()
# Define json File (output)
final_result = {}
final_result['version'] = 'VERSION 1.3'
final_result['results'] = {}
final_result['external_data'] = {'used': True, 'details': 'Features from I3D Net'}
for i in range(1, TEST_NUM + 1):
vid_name = lines[i - 1]
# Load Frames
rgb_features, flow_features, temp_seg, vid_len = utils.processTestVid(i, INPUT_PATHS['test'],
NUM_SEGMENTS)
rgb_features = rgb_features.astype(np.float32)
flow_features = flow_features.astype(np.float32)
# RGB Stream
sess.run(init)
rgb_saver.restore(sess, os.path.join(ckpt['rgb'], 'rgb_' + str(test_iter)))
rgb_class_w = tf.get_default_graph().get_tensor_by_name('Classification/class_weight/kernel:0').eval()
rgb_attention, rgb_raw, rgb_class_result = sess.run(
[model.attention_weights, model.class_weight, model.class_result],
feed_dict={model.X: rgb_features})
# Flow Stream
sess.run(init)
flow_saver.restore(sess, os.path.join(ckpt['flow'], 'flow_' + str(test_iter)))
flow_class_w = tf.get_default_graph().get_tensor_by_name('Classification/class_weight/kernel:0').eval()
flow_attention, flow_raw, flow_class_result = sess.run(
[model.attention_weights, model.class_weight, model.class_result],
feed_dict={model.X: flow_features})
# Gathering Classification Result
rgb_class_prediction = np.where(rgb_class_result > class_threshold)[1]
flow_class_prediction = np.where(flow_class_result > class_threshold)[1]
rgb_tCam = utils.get_tCAM(rgb_features, rgb_class_w)
flow_tCam = utils.get_tCAM(flow_features, flow_class_w)
r_check = False
f_check = False
if rgb_class_prediction.any():
r_check = True
# Weighted T-CAM
rgb_wtCam = utils.get_wtCAM(rgb_tCam, flow_tCam, rgb_attention, ALPHA, rgb_class_prediction)
# Interpolate W-TCAM
rgb_int_wtCam = utils.interpolated_wtCAM(rgb_wtCam, scale)
# Get segment list of rgb_int_wtCam
rgb_temp_idx = utils.get_tempseg_list(rgb_int_wtCam, len(rgb_class_prediction))
# Temporal Proposal
rgb_temp_prop = utils.get_temp_proposal(rgb_temp_idx, rgb_int_wtCam, rgb_class_prediction,
scale, vid_len)
if flow_class_prediction.any():
f_check = True
# Weighted T-CAM
flow_wtCam = utils.get_wtCAM(flow_tCam, rgb_tCam, flow_attention, 1 - ALPHA, flow_class_prediction)
# Interpolate W-TCAM
flow_int_wtCam = utils.interpolated_wtCAM(flow_wtCam, scale)
# Get segment list of flow_int_wtCam
flow_temp_idx = utils.get_tempseg_list(flow_int_wtCam, len(flow_class_prediction))
# Temporal Proposal
flow_temp_prop = utils.get_temp_proposal(flow_temp_idx, flow_int_wtCam, flow_class_prediction,
scale, vid_len)
if r_check and f_check:
# Fuse two stream and perform non-maximum suppression
temp_prop = utils.integrated_prop(rgb_temp_prop, flow_temp_prop, list(rgb_class_prediction),
list(flow_class_prediction))
final_result['results'][vid_name] = utils.result2json([temp_prop])
elif r_check and not f_check:
final_result['results'][vid_name] = utils.result2json(rgb_temp_prop)
elif not r_check and f_check:
final_result['results'][vid_name] = utils.result2json(flow_temp_prop)
utils.inf_progress(i, TEST_NUM, 'Progress', 'Complete', 1, 50)
# Save Results
json_path = os.path.join(ckpt['path'], 'results.json')
with open(json_path, 'w') as fp:
json.dump(final_result, fp)
txt_path = os.path.join(ckpt['path'], 'results.txt')
with open(txt_path, 'w') as tp:
utils.json2txt(final_result['results'], tp)
test_vid_list.close()
|
def a() :
pass
def b() :
return '올라프'
def c(p) :
return p * 3
def d(p) :
if p == 1 :
return True
else :
return False
result1 = a()
result2 = b()
result3 = c('올라프')
result4 = c(10)
result5 = d(1)
result6 = d(2)
print(result1)
print(result2)
print(result3)
print(result4)
print(result5)
print(result6)
print('----------------------')
print(a())
print(b())
print(c('둘리'))
print(c(20))
print(d(1))
print(d(2))
|
# -*- coding=utf-8 -*-
"""The plugin of the pytest.
The pytest plugin hooks do not need to be imported into any test code, it will
load automatically when running pytest.
References:
https://docs.pytest.org/en/2.7.3/plugins.html
"""
import pytest
from rayvision_utils.exception.exception import CGFileNotExistsError
from rayvision_houdini.analyze_houdini import AnalyzeHoudini
def test_get_save_version(houdini, cg_file_h):
"""Test get_save_version function."""
# result = houdini.get_save_version(cg_file_h["cg_file"])
with pytest.raises(CGFileNotExistsError):
houdini.get_save_version(cg_file_h["cg_file"])
def test_find_location(houdini, mocker, tmpdir):
"""Test find_location action """
mocker_cg_file = mocker.patch.object(AnalyzeHoudini, 'find_location')
mocker_cg_file.return_value = tmpdir.join('muti_layer_test.hip')
assert houdini.find_location() == str(tmpdir.join('muti_layer_test.hip'))
|
string = 'string'
print(string[:4:2])
for letter in string:
print(letter + '\n')
print(len(string))
print(string.count('t'))
print(string.title())
print(string.capitalize())
string = '-'
sequence = ['a','b','c','d','e','f','g','h']
print(string.join(sequence));
string = 'a-b--d-g-h-r-hg-d-d-g-f-e-t-y-q-f'
print(string.split('-'))
sequence = string.split('-')
for letter in sequence:
print(letter);
|
activate_this = '/var/www/postmash/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import sys
print sys.path
from postmash import app as application
|
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2023] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module mainly implements python's counterpart of GuestProcess. Read
the later for more information about the JSON protocol used to communicate.
"""
import json
import os
import sys
import traceback
import unittest
import warnings
from . import params
__version__ = "5.0"
class Job:
"""Dummy class to hold job-related information"""
pass
class CompleteEarlyException(Exception):
"""Can be raised by a derived class of BaseRunnable to indicate an early successful termination"""
pass
class JobFailedException(Exception):
"""Can be raised by a derived class of BaseRunnable to indicate an early unsuccessful termination"""
pass
class HiveJSONMessageException(Exception):
"""Raised when we could not parse the JSON message coming from GuestProcess"""
pass
class LostHiveConnectionException(Exception):
"""Raised when the process has lost the communication pipe with the Perl side"""
pass
class BaseRunnable:
"""This is the counterpart of GuestProcess. Note that most of the methods
are private to be hidden in the derived classes.
This class can be used as a base-class for people to redefine fetch_input(),
run() and/or write_output() (and/or pre_cleanup(), post_cleanup()).
Jobs are supposed to raise CompleteEarlyException in case they complete before
reaching. They can also raise JobFailedException to indicate a general failure
"""
# Private BaseRunnable interface
#################################
def __init__(self, read_fileno, write_fileno, debug):
# We need the binary mode to disable the buffering
self.__read_pipe = os.fdopen(read_fileno, mode='rb', buffering=0)
self.__write_pipe = os.fdopen(write_fileno, mode='wb', buffering=0)
self.__pid = os.getpid()
self.debug = debug
self.__process_life_cycle()
def __print_debug(self, *args):
if self.debug > 1:
print("PYTHON {0}".format(self.__pid), *args, file=sys.stderr)
# FIXME: we can probably merge __send_message and __send_response
def __send_message(self, event, content):
"""seralizes the message in JSON and send it to the parent process"""
def default_json_encoder(o):
self.__print_debug("Cannot serialize {0} (type {1}) in JSON".format(o, type(o)))
return 'UNSERIALIZABLE OBJECT'
j = json.dumps({'event': event, 'content': content}, indent=None, default=default_json_encoder)
self.__print_debug('__send_message:', j)
# UTF8 encoding has never been tested. Just hope it works :)
try:
self.__write_pipe.write(bytes(j+"\n", 'utf-8'))
except BrokenPipeError:
raise LostHiveConnectionException("__write_pipe") from None
def __send_response(self, response):
"""Sends a response message to the parent process"""
self.__print_debug('__send_response:', response)
# Like above, UTF8 encoding has never been tested. Just hope it works :)
try:
self.__write_pipe.write(bytes('{"response": "' + str(response) + '"}\n', 'utf-8'))
except BrokenPipeError:
raise LostHiveConnectionException("__write_pipe") from None
def __read_message(self):
"""Read a message from the parent and parse it"""
try:
self.__print_debug("__read_message ...")
l = self.__read_pipe.readline()
self.__print_debug(" ... -> ", l[:-1].decode())
return json.loads(l.decode())
except BrokenPipeError:
raise LostHiveConnectionException("__read_pipe") from None
except ValueError as e:
# HiveJSONMessageException is a more meaningful name than ValueError
raise HiveJSONMessageException from e
def __send_message_and_wait_for_OK(self, event, content):
"""Send a message and expects a response to be 'OK'"""
self.__send_message(event, content)
response = self.__read_message()
if response['response'] != 'OK':
raise HiveJSONMessageException("Received '{0}' instead of OK".format(response))
def __process_life_cycle(self):
"""Simple loop: wait for job parameters, do the job's life-cycle"""
self.__send_message_and_wait_for_OK('VERSION', __version__)
self.__send_message_and_wait_for_OK('PARAM_DEFAULTS', self.param_defaults())
self.__created_worker_temp_directory = None
while True:
self.__print_debug("waiting for instructions")
config = self.__read_message()
if 'input_job' not in config:
self.__print_debug("no params, this is the end of the wrapper")
return
self.__job_life_cycle(config)
def __job_life_cycle(self, config):
"""Job's life-cycle. See GuestProcess for a description of the protocol to communicate with the parent"""
self.__print_debug("__life_cycle")
# Parameters
self.__params = params.ParamContainer(config['input_job']['parameters'], self.debug > 1)
# Job attributes
self.input_job = Job()
for x in ['dbID', 'input_id', 'retry_count']:
setattr(self.input_job, x, config['input_job'][x])
self.input_job.autoflow = True
self.input_job.lethal_for_worker = False
self.input_job.transient_error = True
# Worker attributes
self.debug = config['debug']
# Which methods should be run
steps = [ 'fetch_input', 'run' ]
if self.input_job.retry_count > 0:
steps.insert(0, 'pre_cleanup')
if config['execute_writes']:
steps.append('write_output')
steps.append('post_healthcheck')
self.__print_debug("steps to run:", steps)
self.__send_response('OK')
# The actual life-cycle
died_somewhere = False
try:
for s in steps:
self.__run_method_if_exists(s)
except CompleteEarlyException as e:
self.warning(e.args[0] if len(e.args) else repr(e), False)
except LostHiveConnectionException as e:
# Mothing we can do, let's just exit
raise
except Exception as e:
died_somewhere = True
self.warning( self.__traceback(e, 2), True)
try:
self.__run_method_if_exists('post_cleanup')
except LostHiveConnectionException as e:
# Mothing we can do, let's just exit
raise
except Exception as e:
died_somewhere = True
self.warning( self.__traceback(e, 2), True)
job_end_structure = {'complete' : not died_somewhere, 'job': {}, 'params': {'substituted': self.__params.param_hash, 'unsubstituted': self.__params.unsubstituted_param_hash}}
for x in [ 'autoflow', 'lethal_for_worker', 'transient_error' ]:
job_end_structure['job'][x] = getattr(self.input_job, x)
self.__send_message_and_wait_for_OK('JOB_END', job_end_structure)
def __run_method_if_exists(self, method):
"""method is one of "pre_cleanup", "fetch_input", "run", "write_output", "post_cleanup".
We only the call the method if it exists to save a trip to the database."""
if hasattr(self, method):
self.__send_message_and_wait_for_OK('JOB_STATUS_UPDATE', method)
getattr(self, method)()
def __traceback(self, exception, skipped_traces):
"""Remove "skipped_traces" lines from the stack trace (the eHive part)"""
s1 = traceback.format_exception_only(type(exception), exception)
l = traceback.extract_tb(exception.__traceback__)[skipped_traces:]
s2 = traceback.format_list(l)
return "".join(s1+s2)
# Public BaseRunnable interface
################################
def warning(self, message, is_error = False):
"""Store a message in the log_message table with is_error indicating whether the warning is actually an error or not"""
self.__send_message_and_wait_for_OK('WARNING', {'message': message, 'is_error': is_error})
def dataflow(self, output_ids, branch_name_or_code = 1):
"""Dataflows the output_id(s) on a given branch (default 1). Returns whatever the Perl side returns"""
if branch_name_or_code == 1:
self.input_job.autoflow = False
self.__send_message('DATAFLOW', {'output_ids': output_ids, 'branch_name_or_code': branch_name_or_code, 'params': {'substituted': self.__params.param_hash, 'unsubstituted': self.__params.unsubstituted_param_hash}})
return self.__read_message()['response']
def worker_temp_directory(self):
"""Returns the full path of the temporary directory created by the worker.
"""
if self.__created_worker_temp_directory is None:
self.__send_message('WORKER_TEMP_DIRECTORY', None)
self.__created_worker_temp_directory = self.__read_message()['response']
return self.__created_worker_temp_directory
# Param interface
##################
def param_defaults(self):
"""Returns the defaults parameters for this runnable"""
return {}
def param_required(self, param_name):
"""Returns the value of the parameter "param_name" or raises an exception
if anything wrong happens or the value is None. The exception is
marked as non-transient."""
t = self.input_job.transient_error
self.input_job.transient_error = False
v = self.__params.get_param(param_name)
if v is None:
raise params.NullParamException(param_name)
self.input_job.transient_error = t
return v
def param(self, param_name, *args):
"""When called as a setter: sets the value of the parameter "param_name".
When called as a getter: returns the value of the parameter "param_name".
It does not raise an exception if the parameter (or another one in the
substitution stack) is undefined"""
# As a setter
if len(args):
return self.__params.set_param(param_name, args[0])
# As a getter
try:
return self.__params.get_param(param_name)
except KeyError as e:
warnings.warn("parameter '{0}' cannot be initialized because {1} is missing !".format(param_name, e), params.ParamWarning, 2)
return None
def param_exists(self, param_name):
"""Returns True if the parameter exists and can be successfully
substituted, None if the substitution fails, False if it is missing"""
if not self.__params.has_param(param_name):
return False
try:
self.__params.get_param(param_name)
return True
except KeyError:
return None
def param_is_defined(self, param_name):
"""Returns True if the parameter exists and can be successfully
substituted to a defined value, None if the substitution fails,
False if it is missing or evaluates as None"""
e = self.param_exists(param_name)
if not e:
# False or None
return e
try:
return self.__params.get_param(param_name) is not None
except KeyError:
return False
class BaseRunnableTestCase(unittest.TestCase):
def test_job_param(self):
class FakeRunnableWithParams(BaseRunnable):
def __init__(self, d):
self._BaseRunnable__params = params.ParamContainer(d)
self.input_job = Job()
self.input_job.transient_error = True
j = FakeRunnableWithParams({
'a': 3,
'b': None,
'c': '#other#',
'e': '#e#'
})
# param_exists
self.assertIs( j.param_exists('a'), True, '"a" exists' )
self.assertIs( j.param_exists('b'), True, '"b" exists' )
self.assertIs( j.param_exists('c'), None, '"c"\'s existence is unclear' )
self.assertIs( j.param_exists('d'), False, '"d" doesn\'t exist' )
with self.assertRaises(params.ParamInfiniteLoopException):
j.param_exists('e')
# param_is_defined
self.assertIs( j.param_is_defined('a'), True, '"a" is defined' )
self.assertIs( j.param_is_defined('b'), False, '"b" is not defined' )
self.assertIs( j.param_is_defined('c'), None, '"c"\'s defined-ness is unclear' )
self.assertIs( j.param_is_defined('d'), False, '"d" is not defined (it doesn\'t exist)' )
with self.assertRaises(params.ParamInfiniteLoopException):
j.param_is_defined('e')
# param
self.assertIs( j.param('a'), 3, '"a" is 3' )
self.assertIs( j.param('b'), None, '"b" is None' )
with self.assertWarns(params.ParamWarning):
self.assertIs( j.param('c'), None, '"c"\'s value is unclear' )
with self.assertWarns(params.ParamWarning):
self.assertIs( j.param('d'), None, '"d" is not defined (it doesn\'t exist)' )
with self.assertRaises(params.ParamInfiniteLoopException):
j.param('e')
# param_required
self.assertIs( j.param_required('a'), 3, '"a" is 3' )
with self.assertRaises(params.NullParamException):
j.param_required('b')
with self.assertRaises(KeyError):
j.param_required('c')
with self.assertRaises(KeyError):
j.param_required('d')
with self.assertRaises(params.ParamInfiniteLoopException):
j.param_required('e')
|
__author__ = 'Dell'
##
## init fav must be the first fav from src to dest
## reciprocated fav must be the first fav from dest to src
## reciprocated fav must have larger timestamp than init fav
##
import csv
from datetime import datetime
edgesreader = csv.reader(open("flickr-growth-sorted.txt", "r"), delimiter='\t')
favoritesreader = csv.reader(open("flickr-all-photo-favorite-markings.txt", "r"), delimiter='\t')
photosreader = csv.reader(open("flickr-all-photos.txt", "r"), delimiter = '\t')
writer = csv.writer(open("fav-reciprocation.csv", "wb"), delimiter='\t')
photo_owner = dict((int(row[0]), int(row[2])) for row in photosreader)
favorites = dict()
for row in favoritesreader: # all-photo-favorite-markings is sorted in ascending order of favorite time, code depends on that
if (photo_owner[int(row[1])], int(row[0])) not in favorites:
favorites[(photo_owner[int(row[1])], int(row[0]))] = datetime.strptime(row[2], "%Y-%m-%d %H:%M:%S")
for dest, src in favorites:
initdate = favorites[(dest, src)]
try:
reciprocatedate = favorites[(src, dest)]
except KeyError:
print "No favorite reciprocation"
continue
if reciprocatedate > initdate:
difference = reciprocatedate-initdate
days = difference.days
seconds = difference.seconds
writer.writerow([src, dest, float(days) + float(seconds)/86400.0])
|
#!/usr/bin/env python3
import sys
import os
import argparse
SKOOLKIT_HOME = os.environ.get('SKOOLKIT_HOME')
if not SKOOLKIT_HOME:
sys.stderr.write('SKOOLKIT_HOME is not set; aborting\n')
sys.exit(1)
if not os.path.isdir(SKOOLKIT_HOME):
sys.stderr.write('SKOOLKIT_HOME={}; directory not found\n'.format(SKOOLKIT_HOME))
sys.exit(1)
sys.path.insert(0, SKOOLKIT_HOME)
MANICMINER_HOME = os.environ.get('MANICMINER_HOME')
if not MANICMINER_HOME:
sys.stderr.write('MANICMINER_HOME is not set; aborting\n')
sys.exit(1)
if not os.path.isdir(MANICMINER_HOME):
sys.stderr.write('MANICMINER_HOME={}; directory not found\n'.format(MANICMINER_HOME))
sys.exit(1)
sys.path.insert(0, '{}/sources'.format(MANICMINER_HOME))
from skoolkit.image import ImageWriter
from skoolkit.refparser import RefParser
from skoolkit.skoolhtml import Frame
from skoolkit.snapshot import get_snapshot
from manicminer import ManicMinerHtmlWriter
class ManicMiner(ManicMinerHtmlWriter):
def __init__(self, snapshot):
self.snapshot = snapshot
self.defaults = RefParser()
self.ref_parser = RefParser()
self.ref_parser.parse('{}/sources/mm.ref'.format(MANICMINER_HOME))
self.init()
def _do_pokes(specs, snapshot):
for spec in specs:
addr, val = spec.split(',', 1)
step = 1
if '-' in addr:
addr1, addr2 = addr.split('-', 1)
addr1 = int(addr1)
if '-' in addr2:
addr2, step = [int(i) for i in addr2.split('-', 1)]
else:
addr2 = int(addr2)
else:
addr1 = int(addr)
addr2 = addr1
addr2 += 1
value = int(val)
for a in range(addr1, addr2, step):
snapshot[a] = value
def _place_willy(mm, cavern, spec):
cavern_addr = 45056 + 1024 * cavern
udg_array = mm._get_cavern_udgs(cavern_addr, 1, 0)
if spec:
values = []
for n in spec.split(','):
try:
values.append(int(n))
except ValueError:
values.append(None)
values += [None] * (3 - len(values))
x, y, frame = values
if x is not None and y is not None:
willy = mm._get_graphic(33280 + 32 * (frame or 0), 7)
bg_attr = mm.snapshot[cavern_addr + 544]
mm._place_graphic(udg_array, willy, x, y * 8, bg_attr)
return udg_array
def run(imgfname, options):
snapshot = get_snapshot('{}/build/manic_miner.z80'.format(MANICMINER_HOME))
_do_pokes(options.pokes, snapshot)
mm = ManicMiner(snapshot)
udg_array = _place_willy(mm, options.cavern, options.willy)
if options.geometry:
wh, xy = options.geometry.split('+', 1)
width, height = [int(n) for n in wh.split('x')]
x, y = [int(n) for n in xy.split('+')]
udg_array = [row[x:x + width] for row in udg_array[y:y + height]]
frame = Frame(udg_array, options.scale)
image_writer = ImageWriter()
with open(imgfname, "wb") as f:
image_writer.write_image([frame], f)
###############################################################################
# Begin
###############################################################################
parser = argparse.ArgumentParser(
usage='mmimage.py [options] FILE.png',
description="Create an image of a cavern in Manic Miner.",
formatter_class=argparse.RawTextHelpFormatter,
add_help=False
)
parser.add_argument('imgfname', help=argparse.SUPPRESS, nargs='?')
group = parser.add_argument_group('Options')
group.add_argument('-c', dest='cavern', type=int, default=0,
help='Create an image of this cavern (default: 0)')
group.add_argument('-g', dest='geometry', metavar='WxH+X+Y',
help='Create an image with this geometry')
group.add_argument('-p', dest='pokes', metavar='A[-B[-C]],V', action='append', default=[],
help="Do POKE N,V for N in {A, A+C, A+2C,...B} (this option may be\n"
"used multiple times)")
group.add_argument('-s', dest='scale', type=int, default=2,
help='Set the scale of the image (default: 2)')
group.add_argument('-w', dest='willy', metavar='X,Y[,F]',
help="Place Willy at (X,Y) with animation frame F (0-7)\n")
namespace, unknown_args = parser.parse_known_args()
if unknown_args or not namespace.imgfname:
parser.exit(2, parser.format_help())
run(namespace.imgfname, namespace)
|
"""
Compute zodiacal light at a particular RA/Dec/Date
Get heliocentric lat/lon from lookup table
"""
import os
import numpy as np
import astropy.coordinates as co
import astropy.units as u
import pyfits
if hasattr(co, 'ICRS'):
icrs = co.ICRS
else:
icrs = co.ICRSCoordinates
def datapath():
return os.path.join(os.path.dirname(__file__), 'data')
def flt_zodi(image='ibhm04alq_flt.fits', verbose=True, pirzkal=False):
from subprocess import Popen,PIPE
if 'gz' in image:
os.system('gunzip -c %s |dfits - | fitsort RA_TARG DEC_TARG EXPSTART DATE-OBS FILTER > /tmp/fits_head' %(image))
params = open('/tmp/fits_head').readlines()[-1].split()
else:
stdout, stderr = Popen('gethead -x 0 %s RA_TARG DEC_TARG EXPSTART DATE-OBS FILTER' %(image), shell=True, stdout=PIPE).communicate()
params = stdout.split()
#
ra, dec = np.cast[float](params[:2])
jd = float(params[2]) + 2400000.5
date = params[3]
filter = params[4]
if verbose:
print ra, dec, date
lat, lng = helio_lat_lng(ra, dec, jd)
if pirzkal:
return nor_zodi(lat, lng, filter=filter)
else:
return compute_zodi(ra, dec, jd, FILTER=filter, verbose=verbose)
#
# from subprocess import Popen,PIPE
# stdout, stderr = Popen('dfits ibhm56f4q_raw.fits | fitsort RA_TARG DEC_TARG EXPSTART', shell=True, stdout=PIPE).communicate()
#
# stdout, stderr = Popen('gethead -x 0 ibhm56f4q_raw.fits RA_TARG DEC_TARG EXPSTART', shell=True, stdout=PIPE).communicate()
def go():
jd = 5.651374299768E+04 + 2400000.5
ra, dec = 34.440618, -5.1721396
print compute_zodi(ra, dec, jd, FILTER='F140W', verbose=True)
def compute_zodi(ra, dec, jd, FILTER='F140W', verbose=False):
"""
Get the predicted Zodiacal surface brightness and then fold it through the Synphot WFC3 filters
"""
import pysynphot as S
thermal = {'F160W':0.134, 'F140W':0.069, 'F105W':0.051, 'F110W':0.05, 'F125W':0.052, 'F128N': 0.051, 'F130N': 0.051, 'F132N': 0.051, 'F164N':0.0651, 'G141':0.1, 'G102':0.04, 'F098M':0.05}
lat, lng = helio_lat_lng(ra, dec, jd)
SB = get_zodi_SB(lat, lng)
if verbose:
print 'Lat, Lng: %f, %f, SB=%.2f' %(lat, lng, SB+2.5*np.log10(0.128254**2))
zod = S.FileSpectrum('%s/zodiacal_model_001.fits' %(datapath()))
nz = zod.renorm(SB, S.units.VegaMag, S.ObsBandpass("V"))
bp = S.ObsBandpass('wfc3,ir,%s' %(FILTER.lower()))
obs = S.Observation(nz, bp)
return obs.countrate()+thermal[FILTER]
def helio_lat_lng(ra=0, dec=0, jd=0, fits=None):
import ephem
import cPickle as pickle
#hjd, hra, hdec = np.loadtxt('/Users/brammer/WFC3/Backgrounds/Synphot/sun_coords.dat', unpack=True)
fp = open('%s/sun_coords.pkl' %(datapath()),'rb')
hjd = pickle.load(fp)
hra = pickle.load(fp)
hdec = pickle.load(fp)
fp.close()
ra_sun = np.interp(jd, hjd, hra)
dec_sun = np.interp(jd, hjd, hdec)
if fits is not None:
head = pyfits.getheader(fits, ext=0)
ra, dec, jd = head['RA_TARG'], head['DEC_TARG'], head['EXPSTART']
#return dec-sun_dec, ra-sun_ra
#ra, dec = 34.440618, -5.1721396
eq = icrs(ra=ra, dec=dec, unit=(u.deg, u.deg))
try:
eq.ra.format = eq.ra.to_string
eq.dec.format = eq.dec.to_string
except:
pass
equat = ephem.Equatorial(str(eq.ra.format(sep=':', unit=u.hour)), str(eq.dec.format(sep=':', unit=u.deg)), epoch=ephem.J2000)
eclip_obs = ephem.Ecliptic(equat)
eq = icrs(ra=ra_sun, dec=dec_sun, unit=(u.deg, u.deg))
try:
eq.ra.format = eq.ra.to_string
eq.dec.format = eq.dec.to_string
except:
pass
equat = ephem.Equatorial(str(eq.ra.format(sep=':', unit=u.hour)), str(eq.dec.format(sep=':', unit=u.deg)), epoch=ephem.J2000)
eclip_sun = ephem.Ecliptic(equat)
#dlon = (eclip_obs.lon-eclip_sun.lon)/np.pi*180
#print np.array([eclip_obs.lat, eclip_obs.lon, eclip_sun.lat, eclip_sun.lon])/np.pi*180
dlon = ((eclip_obs.lon - eclip_sun.lon)/np.pi*180 + 180) % 360 - 180
return (eclip_obs.lat-eclip_sun.lat)/np.pi*180, dlon
def get_zodi_SB(lat, lng):
mat = np.loadtxt('%s/zodiac.txt' %(datapath()))
zlat = mat[0,1:]
xlat = np.arange(len(zlat))
zlng = mat[1:,0]
xlng = np.arange(len(zlng))
mat = mat[1:,1:]
ilat = int(np.round(np.interp(np.abs(lat), zlat, xlat)))
ilng = len(zlng)-1-int(np.round(np.interp(np.abs(lng), zlng[::-1], xlng)))
val = mat[ilng, ilat]
#### Vega V mag / pixel
SB = 10-2.5*np.log10(val)-2.5*np.log10((1/3600.)**2)
SB = 10-2.5*np.log10(val)-2.5*np.log10((0.128254/3600.)**2)
return SB
#
def nor_zodi(la,lo,filter=None):
"""
Nor's code to compute the Zodi background
"""
import numpy as n
la = la * 1.
lo = lo * 1.
mul = 1.
if filter=="F098M":
mul = 0.661509846941
elif filter=="F105W":
mul = 1.22608999699
elif filter=="F110W":
mul = 2.05453276964
elif filter=="F125W":
mul = 1.14614936917
elif filter=="F140W":
mul = 1.46132279518
elif filter=="F160W":
mul = 1.
elif filter=="G141":
mul = 2.08
def c(la,lo):
return n.cos(lo/180*n.pi) * n.cos(la/180*n.pi)
def e(la,lo):
return n.arccos(c(lo, la))/n.pi*180
def b(B):
return 1.5 * (n.sqrt(1 + (B/1.5)**2) - 1 )
a0 = 0.333885
a1 = 0.0266097
a2 = 0.628677
a3 = 1.12545
a4 = 1.70214
a5 = 0.842976
a6 = 0.00038706
a7 = 2111.
res = a0 + a1 * (1 - n.cos(b(la)/180*n.pi)) + (a2 + a3 * c(lo, la) + a4 * (c(lo, la)**2) + a5* (c(lo, la)**3) ) * 10**(-n.sin(b(la)/180*n.pi)/(a6 * (e(lo, la) + a7)))
return res*mul
def compare_zodi():
"""
Compare minimum background flux in a visit to the computed zodi
"""
import mywfc3.zodi
from threedhst import catIO
import glob
from mywfc3.utils import gzfile
filter='G141'
asns = glob.glob('*%s_orbit.png' %(filter))
colors = np.array(['blue', 'red', 'orange'])
#colors = np.array(['blue', 'white', 'orange'])
# fig = unicorn.plotting.plot_init(xs=6, aspect=0.7, left=0.12, right=0.12)
# ax = fig.add_subplot(111)
bg_min = np.ones(len(asns))
zodi_predicted = bg_min*1.
nor = bg_min*1.
for i, asn in enumerate(asns):
root = asn.split('_')[0]
print root
os.system('cat %s*%s_orbit.dat > /tmp/%s.dat' %(root[:6], filter, root))
bg1 = catIO.Readfile('/tmp/%s.dat' %(root), save_fits=False, force_lowercase=False)
bg_min[i] = bg1.bg.min()
files=glob.glob('%s*raw.fits*' %(root[:6]))
zodi_predicted[i] = mywfc3.zodi.flt_zodi(gzfile(files[0]), verbose=False)
nor[i] = mywfc3.zodi.flt_zodi(gzfile(files[0]), verbose=False, pirzkal=True)
plt.scatter(bg_min, zodi_predicted, alpha=0.4, color='black', label='Synphot')
plt.scatter(bg_min, nor, alpha=0.4, color='orange', label='Nor')
plt.plot([0,4], [0,4], color='red', alpha=0.2, linewidth=3)
plt.ylim(0.5,3.5); plt.xlim(0.5,3.5)
plt.xlabel('Background, visit minimum')
plt.ylabel('Predicted zodi')
plt.legend(loc='lower right', prop={'size':10})
plt.savefig('PredictedZodi_%s.pdf' %(filter))
|
import sys
print(sys.path)
import requests
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 10:47:15 2012
@author: leonard
"""
import numpy as np
import pylab
class BuscaHarmonica:
""" Algoritmo de otimização e busca baseado em perfomance musical.
Parâmetros:
-Funcao objetivo (fo)
-Número de variáveis de decisao (N)
"""
def __init__(self):
_tx_improv=1 #Taxa de improvisação - possibilita a inserção de novas notas musicais na harmonia
self._tx_par=0.2 #Taxa referente à probabilidade de uma variável sofrer ajustes. RELAÇÃO COM OS VALORES DO INTERVALO
self._mi=10000 #número máximo de improvisos (número máximo de iterações ou gerações)
self._tx_hmcr=0.5 #taxa de escolha de um valor da memória. RELAÇÃO COM OS VALORES DO INTERVALO [entre 0..1]
self._fw=0.1 #Fret Width(tamanho da pertubação) - ajustes que podem ser realizados em uma variável de uma harmonia (elemento do vetor)
self._hms=6 #Número de vetores presentes na memória harmônica
self._n_inst=2 #Número de instrumentos na memória harmônica - similar a dimensão do problema (quantidade de variáveis do problema)
self.set_memoria_harmonica(self._hms,self._n_inst)
self.set_nova_harmonia(self._n_inst)
def funcao(self,x):
""" Definição da função objetivo do problema proposto """
return -(sum(fabs(x))*exp(-sum(x**2))) # Minimizar (sinal negativo)
def grafico(self):
entrada = pylab.arange(0, 20, 1)
saida=self.funcao(entrada)
pylab.plot(entrada, saida)
pylab.xlabel('x1')
pylab.ylabel('x2')
pylab.title('f(x1,x2)')
pylab.grid(True)
pylab.show()
#return 0
def set_nova_harmonia(self,_p_nInst):
_vnh=[] #Novo vetor harmônico
#ALTERAR PARA ATRIBUIR OS VALORES ENTRE A FAIXA DE LIMITES DAS VARIÁVEIS (AQUI INSTRUMENTOS)...
#_vnh.append(rand(_p_nInst)); #Nova harmonia com valores entre 0..1 de dimensão em relação _n_inst
_vnh.append(array([uniform(-2,2),uniform(-2,2)]))
self._v_nova_harmonia=_vnh
def get_nova_harmonia(self):
return self._v_nova_harmonia
def set_memoria_harmonica(self,_pHms,_pNinst):
""" Cria e inicializa a memória harmônica, composta por n harmonias(vetor).
Parãmetros:
- Número de vetores presentes na memória harmônica
- Número de instrumentos na memória harmônica
"""
_vh=[] #vetor harmônico
for i in range(_pHms):
_vh.append(rand(_pNinst))
for j in range(_pNinst):
_vh[i][j]=uniform(-2,2)
self._mHarm=_vh
#return self._mHarm
def get_memoria_harmonica(self):
return self._mHarm
def improvisar_nova_memoria(self,_pHmcr,_pFw,_pHms):
""" Uma nova harmonia será gerada a partir da combinação de várias harmonias existentes na memória harmônica.
Parãmetros:
- _pHmcr-> taxa de escolha de um valor da memória
- _pFw -> taxa da pertubacao de uma harmonia
- _pHms ->
"""
_r=uniform(-1,1) #escolha aleatória de reais entre -1 e 1
for j in range(self._n_inst):
_iAle=randint(0,_pHms) #escolha aleatória de uma harmonia da memória harmônica (linha de uma matriz)
if (float(rand(1))<=self._tx_hmcr):
self._v_nova_harmonia[0][j]=self._mHarm[_iAle][j]
if (float(rand(1))<=self._tx_par):
self._v_nova_harmonia[0][j]=self._v_nova_harmonia[0][j]+_r*self._fw
else:
#self._v_nova_harmonia[0][j]=float(rand(1)) #valores reais aleatórios entre 0..1
self._v_nova_harmonia[0][j]=float(uniform(-2,2)) #valores reais aleatórios entre 0..1
return self._v_nova_harmonia
def atualizar_memoria_harmonica(self,_pNovaHarmonia):
""" Verifica se a nova harmonia é melhor do que a pior harmonia na memória harmônica. Caso seja, a pior harmonia deverá ser substituida pela nova.
Parâmetros:
- _pNovaHarmonia -> A nova harmonia abtida no método improvisar_nova_memoria"""
#Se existir uma harmonia pior do a nova harmonia, substituir pior.
#se o valor da função da nova harmonia é melhor do que a pior harmonia na memória. Considerar a função de minimizar
self.pior_harmonia(self._mHarm)
if (self.funcao(_pNovaHarmonia[0])<self._pVh):
self._mHarm[self._pPh]=_pNovaHarmonia[0] #substituir melhor harmonia pela pior da memória harmônica
self._melhorHarmonia=_pNovaHarmonia
return self._melhorHarmonia
def pior_harmonia(self,_pHarm):
""" Calcula a pior harmonia da memória harmônica.
Parâmetros:
- _pHarm -> memória harmônica
Retorno:
- _pH -> localização (posição) da pior harmonia
- _pVh -> pior valor da função na posição _pH
"""
_f=0
self._pPh=0 #Posicao da pior harmonia
self._pVh=self.funcao(self._mHarm[0]) #Pior valor (f(x)) da harmonia da memória harmônica
#verificando o pior valor da harmonia armazenada na memória harmônica. Como queremos minimizar o pior valor é o mais alto
for i in range(1,self._hms):
_f=self.funcao(self._mHarm[i]) #_f armazena o valor da função da harmonia[i]
if (_f>self._pVh):
self._pVh=_f
self._pPh=i
return self._pVh,self._pPh
def melhor_harmonia(self,_pHarm):
""" Calcula a melhor harmonia da memória harmônica
Parâmetros:
- _pHarm -> Memória harmônica
Retorno:
- _pHarm[_mPh] -> melhor valor da função na posição _mPh
- _pVh -> localização (posição) da melhor harmonia
"""
_mPh=0 #Posicao da melhor harmonia
_mH=self.funcao(self._mHarm[0]) #Melhor valor (f(x)) da harmonia da memória harmônica
for i in range(1,self._hms):
_f=self.funcao(self._mHarm[i]) #_f armazena o valor da função da harmonia[i]
if (_f>_mH):
_mH=_f
_mPh=i
return _pHarm[_mPh],_mH
def calculate(self):
""" Método responsavel por realizar o cálculo """
for k in range(self._mi): #até o número máximo de iterações
self.set_nova_harmonia(self._n_inst)
self.improvisar_nova_memoria(self._tx_hmcr,self._fw,self._hms)
self.atualizar_memoria_harmonica(self._v_nova_harmonia)
print self.melhor_harmonia(self._mHarm)
return self.melhor_harmonia(self._mHarm)
a=BuscaHarmonica()
a.calculate()
|
# Generated by Django 2.0.7 on 2019-01-06 21:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basedata', '0035_auto_20190106_2056'),
]
operations = [
migrations.AlterField(
model_name='feedback_report',
name='idnum',
field=models.PositiveIntegerField(blank=True, max_length=8, null=True, verbose_name='序号'),
),
migrations.AlterField(
model_name='work_hour',
name='thisid',
field=models.IntegerField(default=0, max_length=8, verbose_name='序号'),
),
migrations.AlterField(
model_name='worknode',
name='doc',
field=models.IntegerField(blank=True, max_length=8, null=True, verbose_name='审批文件'),
),
]
|
from .base import *
from .mnist import *
|
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list) # A
def addEdge(self, boy, girl):
self.graph[boy].append(girl) # B
def BFS(self, start):
visited = [False] * (len(self.graph)) # C
queue = []
queue.append(start)
visited[start] = True # D
while queue:
start = queue.pop(0)
print(start, end=' | ') # E
for i in self.graph[start]:
if visited[i] is False:
queue.append(i)
visited[i] = True # F
graph = Graph()
graph.addEdge(0, 1) # G
graph.addEdge(0, 2)
graph.addEdge(1, 2)
graph.addEdge(2, 0)
graph.addEdge(2, 3)
graph.addEdge(3, 3)
print("广度优先搜索的结果是(从2开始):")
graph.BFS(2) # H
|
import random
from time import time
ms=9
class Terrain:
icon = ""
movement = 0
trees=0
miningquality=0
def __init__(self, icon, movement, trees,miningquality,foodDensity):
self.icon=icon
self.movement=movement
self.trees=trees
self.miningquality=miningquality
class Map:
terrains=[]
def __init__(self, MAPSIZE):
self.MAPSIZE = MAPSIZE
for i in range(0, MAPSIZE):
self.terrains.append([])
for n in range(0, MAPSIZE):
self.terrains[i].append(terrainTemplates["empty"])
pass
def getTerrainAtPoint(self,x,y):
return self.terrains[x][y]
def setTerrainAtPoint(self,x,y,newterrain):
self.terrains[x][y] = newterrain
def getMap(self):
final = ""
for terrainy in range(0,self.MAPSIZE):
for terrainx in range(0,self.MAPSIZE):
final += self.terrains[terrainy][terrainx].icon
final+="\n"
return final
def getMapWithPlayer(self,px,py):
final = ""
for terrainy in range(0,self.MAPSIZE):
for terrainx in range(0,self.MAPSIZE):
if not px == terrainx and py == terrainy:
final += self.terrains[terrainy][terrainx].icon
else:
final += "PP"
final+="\n"
return final
terrainTemplates = {}
terrainTemplates["empty"] = Terrain(" ",0,0,0,0)
terrainTemplates["corruption"] = Terrain("XX",3,40,1,0)
terrainTemplates["forest"] = Terrain("||",2,40, 20,10)
terrainTemplates["plains"] = Terrain("__", 1, 2, 15,2)
terrainTemplates["mountains"] = Terrain("^^",2,5,45,5)
def translateKeyIntoInt(key):
it = 0
for i in terrainTemplates.keys():
if i == key:
return it
it+=1
def up(y):
if y+1 < 10:
return y+1
else:
return 0
def down(y):
if(y - 1 >= 0):
return y-1
else:
return ms
def left(x):
if x-1 >= 0:
return x-1
else:
return ms
def right(x):
if(x + 1 < 10):
return x+1
else:
return ms
def printgenmap(generationMap):
for yy in range(0,ms):
for xx in range(0,ms):
print(generationMap[xx][yy],end="")
print(" ",end="")
print(end="\n")
def generateRandomMap(mapobject, MAPSIZE):
generationMap = []
for y in range(0, MAPSIZE):
generationMap.append([])
for x in range(0,MAPSIZE):
generationMap[y].append(1000)
random.seed=time()
#Set "seeds"
for value in terrainTemplates.values():
x = random.randint(0,MAPSIZE-1)
y = random.randint(0,MAPSIZE-1)
mapobject.setTerrainAtPoint(x,y,value)
generationMap[x][y] = 0
print(mapobject.getMap())
#Spread
for i in range(0, 1000):
for y in range(0, MAPSIZE):
for x in range(0, MAPSIZE):
if mapobject.getTerrainAtPoint(x,y).icon != " ":
if generationMap[x][up(y)] > generationMap[x][y]:
generationMap[x][up(y)] = generationMap[x][y] + 1
mapobject.setTerrainAtPoint(x,up(y), mapobject.getTerrainAtPoint(x,y))
if generationMap[x][down(y)] > generationMap[x][y]:
generationMap[x][down(y)] = generationMap[x][y] + 1
mapobject.setTerrainAtPoint(x, down(y), mapobject.getTerrainAtPoint(x, y))
if generationMap[left(x)][y] > generationMap[x][y]:
generationMap[left(x)][y] = generationMap[x][y] + 1
mapobject.setTerrainAtPoint(left(x), y, mapobject.getTerrainAtPoint(x, y))
if generationMap[right(x)][y] > generationMap[x][y]:
generationMap[right(x)][y] = generationMap[x][y] + 1
mapobject.setTerrainAtPoint(right(x), y, mapobject.getTerrainAtPoint(x, y))
for y in range(0, MAPSIZE):
for x in range(0, MAPSIZE):
if mapobject.getTerrainAtPoint(x,y).icon == " ":
mapobject.setTerrainAtPoint(x,y, mapobject.getTerrainAtPoint(random.randrange(0,MAPSIZE),random.randrange(0,MAPSIZE)))
print(mapobject.getMap()) |
#!/usr/bin/env python3
import math
import collections
def half(length):
return math.sqrt(2 - math.sqrt(4-length*length))
def double(length):
return math.sqrt(length*length*(4-length*length))
def sum_(table, alpha, beta):
sup_alpha = math.sqrt(4-table[alpha]**2)
sup_beta = math.sqrt(4-table[beta]**2)
return (table[alpha] * sup_beta + table[beta] * sup_alpha)/2
def sub_(table, alpha, beta):
sup_alpha = math.sqrt(4-(table[alpha]**2))
sup_beta = math.sqrt(4-(table[beta]**2))
return (table[alpha] * sup_beta - table[beta] * sup_alpha)/2
def triple(table, theta):
return 3*table[theta] - table[theta]**3
def onethird(table, theta):
delta = (table[theta]**2)/4 + math.pow(((-3)/3.0), 3)
print(theta/3.0, theta, delta);
"""
a = math.sqrt((table[theta]**2)/4 + a0)
b = -(table[theta]/2)
print(b+a)
print(b-a)
print(math.pow(-3, 1.0/3.0))
print("hello")
c = math.pow(b+a, 1.0/3.0)
d = math.pow(b-a, 1.0/3.0)
print(d)
return c+d
"""
def get36():
return 0.5*(math.sqrt(5)-1)
def build_chord_table():
chord_table = collections.OrderedDict()
crd36 = get36()
Q = collections.deque([(0.0, 0.0), (60.0, 1.0), (180.0, 2.0), (36.0, crd36), (0.5, .00872)])
#Q = collections.deque([(0.0, 0.0), (60.0, 1.0), (180.0, 2.0)])
while (Q):
(theta, chord) = Q.popleft()
chord_table[theta] = chord
h = theta / 2.0
if (h >= 0.5 and h % 0.5 == 0 and h not in chord_table):
Q.append((h, half(chord)))
d = theta * 2
if (d < 180.0 and d not in chord_table):
Q.append((d, double(chord)))
for k, v in chord_table.items():
s = theta + k
#if s < 180.0 and s not in chord_table:
# Q.append((s, sum_(chord_table, theta, k)))
s = theta - k
if s > 0.0 and s not in chord_table:
Q.append((s, sub_(chord_table, theta, k)))
s = k - theta
if s > 0.0 and s not in chord_table:
Q.append((s, sub_(chord_table, k, theta)))
s = 3 * theta
if s < 180.0 and s not in chord_table:
Q.append((s, triple(chord_table, theta)))
"""
h = theta/3.0
if (h % 0.5 == 0):
if (h > 0.5 and h not in chord_table):
onethird(chord_table, theta)
Q.append((h, onethird(chord_table, theta)))
"""
return chord_table
def print_chord_table(table):
i = 0
for k, v in sorted(table.items()):
if i % 4 == 0:
print("")
sin = 2 * math.sin(math.radians(k/2))
R = round(v, 6) == round(sin, 6)
if R == False:
print("False: ", k, round(v, 6), round(sin, 6))
#print("{}, {}, {} {} | ".format(k, round(v, 6), round(sin, 6), R), end="")
print("{}, {}, ".format(k, round(v, 6)), end="")
i += 1
print("\ntotal: {}".format(i))
def main():
chords = build_chord_table()
print_chord_table(chords)
if __name__ == "__main__":
main()
|
from quickbats.config import CONFIG
from quickbats.config import AUTH
from quickbats.config import TOKENS
def test_config_sections():
assert "stripe" in CONFIG
def test_auth_keys():
assert isinstance(AUTH, dict)
assert "quickbooks_client_id" in AUTH
def test_tokens_keys():
assert "access_token" in TOKENS
|
# Standard Deviation Skeleton
# This program should compute the standard deviation of a sequence of non-negative numbers, terminated by a -1.
# Standard deviation is: A measure of how spread-out data is.
# NOTE: You are not allowed to use a built-in standard deviation function from the libraries.
import math
list1 = []
userinput = int(input("Please enter a sequence of non-negative numbers, terminated by a -1:\n"))
while (userinput >= 0):
list1.append(userinput)
userinput = int(input())
# 1) Find the arithmetic mean (average)
xlen = len(list1) - 1
counter = 0
xplace = 0
mavg = 0
while (counter <= xlen):
mavg = mavg + list1[xplace]
xplace = xplace + 1
counter = counter + 1
meanaverage = mavg / len(list1)
# 2) Find the difference between each data point and the mean
list2 = []
counter2 = 0
xlen2 = len(list1) - 1
xplace2 = 0
sumsq = 0
while (counter2 <= xlen2):
dpmean = abs(list1[xplace2] - meanaverage)
# 3) Square the differences
sqdiff = math.sqrt(dpmean)
# 4) Sum the squares of the differences
sumsq = sumsq + sqdiff
# 5) Divide the sum by the number of data points
list2len = len(list1)
meanavgsq = sumsq / list2len
# 6) Now square root it.
list2.append(dpmean)
counter2 = counter2 + 1
xplace2 = xplace2 + 1
print("Standard Deviation: ", math.sqrt(meanavgsq))
|
from components.fighter import Fighter
from components.ai import BasicMonster
from components.inventory import Inventory
from components.equipment import Equipment
from components.tome_factory import make_tome
import tcod
def component(name):
# TODO: Change this into a proper factory
component_map = {
"PLAYER" : Fighter(hp=60, defense=2, power=5, magic=1),
"ORC" : Fighter(hp=10, defense=0, power=3, xp=35),
"TROLL" : Fighter(hp=16, defense=1, power=4, xp=100),
"BASIC" : BasicMonster(),
"INVENTORY" : Inventory(26),
"EQUIPMENT" : Equipment()
}
return component_map[name]
def make_item(name):
return make_tome(name)
|
# count = 10
# def test():
# # pass
# # count = 5
# # print(count) # 全局变量不可被修改,相当于重新创建了一个count,可以
#
# # count +=1 # 报错 #全局变量被调用后,再修改报错 count +=1 相当于count = count +1
#
# # print(count)
# # count = 5 # 报错 # 全局变量被调用后,再修改报错
# 原因是,在局部变量中有count变量,但是在count被定义之前就使用了count,所以报错
# 因为python首先会在局部作用域中找count,找到后,就不在去全局里面找了
#
# global count
# print(count)
# count = 5
# print(count)
#
# test()
'''
变量查找顺序 LEGB原则:局部>外层>当前模块中的全局>python内置作用域
只有模块,类,函数,才能引入新的作用域
对于一个变量,内部作用域先声明,就会覆盖外部变量,不声明直接使用,就会使用外部作用域的变量,且不能修改该变量
内部作用域变量要修改外部作用域的变量值时,全局变量要使用global 关键字,嵌套作用域变量要使用nonlocal关键字
'''
# 练习
# 全局变量和局部变量
# a = 10
# def test():
# # a = 11
# # print(a)
# # a = 12
# # print(a)
#
# # print(a)
# # a = 11
#
# global a
# a = 0
# print(a)
# print(a)
# test()
# 外部变量和局部变量
count = 10
def outer():
outer = 0
global count
count = 11
print(count)
print(outer)
def inner():
nonlocal outer
count = 100
outer ='111'
print(count)
print(outer)
inner()
outer()
|
from flask import request
from projectmanager.app import app
from projectmanager.mongodb import (
project_userlist_collection
)
from projectmanager.dao.project_userlist import (
ProjectUserListMongoDBDao, ProjectUser
)
from projectmanager.utils.handle_api import handle_response, verify_request
META_SUCCESS = {'status': 200, 'msg': '修改成功!'}
@app.route('/project/user/edit', methods=['PUT'])
@verify_request(['project', 'access'], access='project_user_edit')
@handle_response
def project_user_edit():
body = request.json
project_id = body.pop('project_id')
project_userlist_dao = ProjectUserListMongoDBDao(
project_userlist_collection
)
# 修改项目成员
new_project_users = []
for user_dict in body['users']:
new_project_users.append(ProjectUser(**user_dict))
project_userlist_dao.edit_users(project_id, new_project_users)
return {
'meta': META_SUCCESS
}
|
def find(A,x):
p = A[x]
if p == x:
return x
a = find(A,p)
A[x] = a
return a
def union(A, x, y):
# bx, by = sorted([find(A,x), find(A,y)]) # bx, by = find(A,x), find(A,y)だと無限ループ。
if find(A,x) > find(A,y):
bx, by = find(A,y), find(A,x)
else:
bx, by = find(A,x), find(A,y)
A[y] = bx
A[by] = bx
N, M = map( int, input().split())
E = [ list( map( int, input().split())) for _ in range(M)]
ans = N*(N-1)//2
V = [1]*N
ANS = [0]*M
ANS[M-1] = ans
A = [ i for i in range(N)]
for i in range(M-1, 0,-1):
a, b = E[i]
a, b = find(A,a-1), find(A,b-1)
if not a == b:
ans -= V[a]*V[b]
V[a] = V[b] = V[a]+V[b]
union(A,a,b)
ANS[i-1] = ans
if ans == 0:
break
for i in range(M):
print(ANS[i])
|
import transaction
from freezegun import freeze_time
from io import BytesIO
from onegov.gazette.models import GazetteNotice
from onegov.pdf.utils import extract_pdf_info
from openpyxl import load_workbook
from tests.onegov.gazette.common import login_admin
from tests.onegov.gazette.common import login_editor_1
from tests.onegov.gazette.common import login_editor_2
from tests.onegov.gazette.common import login_editor_3
from tests.onegov.gazette.common import login_publisher
from tests.onegov.gazette.common import login_users
from tests.onegov.gazette.common import publish_issue
from unittest.mock import patch
from urllib.parse import parse_qs
from urllib.parse import urlparse
from webtest import TestApp as Client
def test_view_notices(gazette_app):
with freeze_time("2017-11-01 11:00", tick=True):
publisher = Client(gazette_app)
login_publisher(publisher)
editor_1 = Client(gazette_app)
login_editor_1(editor_1)
editor_2 = Client(gazette_app)
login_editor_2(editor_2)
editor_3 = Client(gazette_app)
login_editor_3(editor_3)
for user in (publisher, editor_1, editor_2, editor_3):
for state in (
'drafted', 'submitted', 'rejected', 'accepted', 'published'
):
assert "Keine Meldungen" in user.get('/notices/' + state)
# new notices
manage = editor_1.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
manage = editor_3.get('/notices/drafted/new-notice')
manage.form['title'] = "Kantonsratswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
for user in (publisher, editor_1, editor_2, editor_3):
for state in ('submitted', 'rejected', 'accepted', 'published'):
assert "Keine Meldungen" in user.get('/notices/' + state)
assert "Erneuerungswahlen" in publisher.get('/notices/drafted')
assert "Erneuerungswahlen" in editor_1.get('/notices/drafted')
assert "Erneuerungswahlen" in editor_2.get('/notices/drafted')
assert "Erneuerungswahlen" not in editor_3.get('/notices/drafted')
assert "Kantonsratswahlen" in publisher.get('/notices/drafted')
assert "Kantonsratswahlen" not in editor_1.get('/notices/drafted')
assert "Kantonsratswahlen" not in editor_1.get('/notices/drafted')
assert "Kantonsratswahlen" in editor_3.get('/notices/drafted')
# submit notices
editor_1.get('/notice/erneuerungswahlen/submit').form.submit()
editor_3.get('/notice/kantonsratswahlen/submit').form.submit()
for user in (publisher, editor_1, editor_2, editor_3):
for state in ('drafted', 'rejected', 'accepted', 'published'):
assert "Keine Meldungen" in user.get('/notices/' + state)
assert "Erneuerungswahlen" in publisher.get('/notices/submitted')
assert "Erneuerungswahlen" in editor_1.get('/notices/submitted')
assert "Erneuerungswahlen" in editor_2.get('/notices/submitted')
assert "Erneuerungswahlen" not in editor_3.get('/notices/submitted')
assert "Kantonsratswahlen" in publisher.get('/notices/submitted')
assert "Kantonsratswahlen" not in editor_1.get('/notices/submitted')
assert "Kantonsratswahlen" not in editor_2.get('/notices/submitted')
assert "Kantonsratswahlen" in editor_3.get('/notices/submitted')
# reject notices
manage = publisher.get('/notice/erneuerungswahlen/reject')
manage.form['comment'] = 'comment'
manage.form.submit()
manage = publisher.get('/notice/kantonsratswahlen/reject')
manage.form['comment'] = 'comment'
manage.form.submit()
for user in (publisher, editor_1, editor_2, editor_3):
for state in ('drafted', 'submitted', 'accepted', 'published'):
assert "Keine Meldungen" in user.get('/notices/' + state)
assert "Erneuerungswahlen" in publisher.get('/notices/rejected')
assert "Erneuerungswahlen" in editor_1.get('/notices/rejected')
assert "Erneuerungswahlen" in editor_2.get('/notices/rejected')
assert "Erneuerungswahlen" not in editor_3.get('/notices/rejected')
assert "Kantonsratswahlen" in publisher.get('/notices/rejected')
assert "Kantonsratswahlen" not in editor_1.get('/notices/rejected')
assert "Kantonsratswahlen" not in editor_2.get('/notices/rejected')
assert "Kantonsratswahlen" in editor_3.get('/notices/rejected')
# submit & accept notices
editor_1.get('/notice/erneuerungswahlen/submit').form.submit()
publisher.get('/notice/erneuerungswahlen/accept').form.submit()
editor_3.get('/notice/kantonsratswahlen/submit').form.submit()
publisher.get('/notice/kantonsratswahlen/accept').form.submit()
for user in (publisher, editor_1, editor_2, editor_3):
for state in ('drafted', 'submitted', 'rejected', 'published'):
assert "Keine Meldungen" in user.get('/notices/' + state)
assert "Erneuerungswahlen" in publisher.get('/notices/accepted')
assert "Erneuerungswahlen" in editor_1.get('/notices/accepted')
assert "Erneuerungswahlen" in editor_2.get('/notices/accepted')
assert "Erneuerungswahlen" not in editor_3.get('/notices/accepted')
assert "Kantonsratswahlen" in publisher.get('/notices/accepted')
assert "Kantonsratswahlen" not in editor_1.get('/notices/accepted')
assert "Kantonsratswahlen" not in editor_2.get('/notices/accepted')
assert "Kantonsratswahlen" in editor_3.get('/notices/accepted')
# publish notices
assert "Erneuerungswahlen" in publisher.get('/notices/accepted')
assert "Erneuerungswahlen" in editor_1.get('/notices/accepted')
assert "Erneuerungswahlen" in editor_2.get('/notices/accepted')
assert "Erneuerungswahlen" not in editor_3.get('/notices/accepted')
assert "Kantonsratswahlen" in publisher.get('/notices/accepted')
assert "Kantonsratswahlen" not in editor_1.get('/notices/accepted')
assert "Kantonsratswahlen" not in editor_2.get('/notices/accepted')
assert "Kantonsratswahlen" in editor_3.get('/notices/accepted')
def test_view_notices_filter(gazette_app):
with freeze_time("2017-11-01 11:00"):
client = Client(gazette_app)
login_editor_1(client)
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '100'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/erneuerungswahlen/submit').form.submit()
login_editor_2(client)
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Kantonsratswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '12'
manage.form['issues'] = ['2017-45', '2017-46']
manage.form['text'] = "9. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/kantonsratswahlen/submit').form.submit()
login_publisher(client)
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Regierungsratswahlen"
manage.form['organization'] = '300'
manage.form['category'] = '13'
manage.form['issues'] = ['2017-47']
manage.form['text'] = "10. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/regierungsratswahlen/submit').form.submit()
manage = client.get('/notice/kantonsratswahlen/edit')
manage.form['text'] = "10. Oktober 2017"
manage.form.submit()
manage = client.get('/notices/submitted')
assert "Erneuerungswahlen" in manage
assert "Kantonsratswahlen" in manage
assert "Regierungsratswahlen" in manage
assert '<option value="2017-11-03">2017-44</option>' in manage
assert '<option value="2017-11-10">2017-45</option>' in manage
assert '<option value="2017-11-17">2017-46</option>' in manage
assert '<option value="2017-11-24">2017-47</option>' in manage
assert '<option value="2017-12-01">2017-48</option>' not in manage
assert '<option value="100">State Chancellery</option>' in manage
assert '<option value="200">Civic Community</option>' in manage
assert '<option value="300">Municipality</option>' in manage
assert '<option value="500">Corporation</option>' not in manage
assert '<option value="11">Education</option>' in manage
assert '<option value="12">Submissions</option>' in manage
assert '<option value="13">Commercial Register</option>' in manage
assert '<option value="14">Elections</option>' not in manage
manage.form['term'] = 'neuerun'
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" in manage
assert "Kantonsratswahlen" not in manage
assert "Regierungsratswahlen" not in manage
manage = client.get('/notices/submitted')
manage.form['term'] = '10. Oktober'
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" not in manage
assert "Kantonsratswahlen" in manage
assert "Regierungsratswahlen" in manage
manage = client.get('/notices/submitted')
manage.form['term'] = '10. Oktober'
manage.form['categories'] = '12'
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" not in manage
assert "Kantonsratswahlen" in manage
assert "Regierungsratswahlen" not in manage
manage = client.get('/notices/submitted')
manage.form['term'] = '10. Oktober'
manage.form['organizations'] = '200'
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" not in manage
assert "Kantonsratswahlen" in manage
assert "Regierungsratswahlen" not in manage
manage = client.get('/notices/submitted')
manage.form['term'] = '10. Oktober'
manage.form['from_date'] = '2017-11-17'
manage.form['to_date'] = '2017-11-17'
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" not in manage
assert "Kantonsratswahlen" in manage
assert "Regierungsratswahlen" not in manage
manage = client.get('/notices/submitted')
manage.form['own'] = True
manage = manage.form.submit().maybe_follow()
assert "Erneuerungswahlen" not in manage
assert "Kantonsratswahlen" in manage
assert "Regierungsratswahlen" in manage
def test_view_notices_order(gazette_app):
def get_items(page):
return [a.text for a in page.pyquery('td strong a')]
def get_ordering(page):
return {
r['order'][0]: r['direction'][0]
for r in [
parse_qs(urlparse(a.attrib['href']).query)
for a in page.pyquery('th a')
]
}
with freeze_time("2017-11-01 11:00", tick=True):
client = Client(gazette_app)
login_publisher(client)
# new notice
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '100'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-46']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/erneuerungswahlen/submit').form.submit()
client.get('/notice/erneuerungswahlen/accept').form.submit()
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Kantonsratswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '13'
manage.form['issues'] = ['2017-45', '2017-47']
manage.form['text'] = "10. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/kantonsratswahlen/submit').form.submit()
client.get('/notice/kantonsratswahlen/accept').form.submit()
# Default sorting
ordered = client.get('/notices/accepted')
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(client.get('/notices/accepted')) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'desc'
}
# Invalid sorting
ordered = client.get('/notices/accepted?order=xxx')
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(client.get('/notices/accepted')) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'desc'
}
# Omit direction
ordered = client.get('/notices/accepted?order=category')
assert get_items(ordered) == ["Kantonsratswahlen", "Erneuerungswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'desc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
# Sort by
# ... title
url = '/notices/accepted?order={}&direction={}'
ordered = client.get(url.format('title', 'asc'))
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(ordered) == {
'title': 'desc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
ordered = client.get(url.format('title', 'desc'))
assert get_items(ordered) == ["Kantonsratswahlen", "Erneuerungswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
# ... organization
ordered = client.get(url.format('organization', 'asc'))
assert get_items(ordered) == ["Kantonsratswahlen", "Erneuerungswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'desc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
ordered = client.get(url.format('organization', 'desc'))
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
# ... category
ordered = client.get(url.format('category', 'asc'))
assert get_items(ordered) == ["Kantonsratswahlen", "Erneuerungswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'desc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
ordered = client.get(url.format('category', 'desc'))
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
# ... group
ordered = client.get(url.format('group.name', 'asc'))
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'desc',
'user.name': 'asc',
'first_issue': 'asc'
}
ordered = client.get(url.format('category', 'desc'))
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
# ... user
ordered = client.get(url.format('user.name', 'asc'))
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'desc',
'first_issue': 'asc'
}
ordered = client.get(url.format('category', 'desc'))
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
# ... issues
ordered = client.get(url.format('first_issue', 'asc'))
assert get_items(ordered) == ["Erneuerungswahlen", "Kantonsratswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'desc'
}
ordered = client.get(url.format('first_issue', 'desc'))
assert get_items(ordered) == ["Kantonsratswahlen", "Erneuerungswahlen"]
assert get_ordering(ordered) == {
'title': 'asc',
'organization': 'asc',
'category': 'asc',
'group.name': 'asc',
'user.name': 'asc',
'first_issue': 'asc'
}
def test_view_notices_pdf_preview(gazette_app):
with freeze_time("2017-11-01 11:00"):
client = Client(gazette_app)
login_publisher(client)
# new notice
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Kantonsratswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-45']
manage.form['text'] = "10. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
response = client.get('/notices/drafted/preview-pdf')
assert response.headers['Content-Type'] == 'application/pdf'
assert response.headers['Content-Disposition'] == \
'inline; filename=amtsblatt-govikon.pdf'
assert extract_pdf_info(BytesIO(response.body)) == (
1,
'xxx Erneuerungswahlen '
'1. Oktober 2017 '
'Govikon, 1. Januar 2019 '
'State Chancellerist '
'xxx Kantonsratswahlen '
'10. Oktober 2017 '
'Govikon, 1. Januar 2019 '
'State Chancellerist '
'© 2017 Govikon 1'
)
def test_view_notices_index(gazette_app):
with freeze_time("2017-11-01 11:00", tick=True):
client = Client(gazette_app)
login_publisher(client)
# new notice
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '200'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-45']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/erneuerungswahlen/submit').form.submit()
client.get('/notice/erneuerungswahlen/accept').form.submit()
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Kantonsratswahlen"
manage.form['organization'] = '300'
manage.form['category'] = '12'
manage.form['issues'] = ['2017-45', '2017-46']
manage.form['text'] = "10. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/kantonsratswahlen/submit').form.submit()
client.get('/notice/kantonsratswahlen/accept').form.submit()
publish_issue(client, '2017-44')
publish_issue(client, '2017-45')
publish_issue(client, '2017-46')
response = client.get('/notices/published/index')
assert response.headers['Content-Type'] == 'application/pdf'
assert response.headers['Content-Disposition'] == \
'inline; filename=amtsblatt-govikon.pdf'
assert extract_pdf_info(BytesIO(response.body)) == (
2,
'Amtsblatt '
'Stichwortverzeichnis '
'Organisationen '
'C '
'Civic Community 2017-44-1, 2017-45-2 '
'M '
'Municipality 2017-45-3, 2017-46-4 '
'© 2017 Govikon 1 '
'Amtsblatt '
'Rubriken '
'E '
'Education 2017-44-1, 2017-45-2 '
'S '
'Submissions 2017-45-3, 2017-46-4 '
'© 2017 Govikon 2'
)
def test_view_notices_statistics(gazette_app):
editor = Client(gazette_app)
login_editor_3(editor) # this has no group
publisher = Client(gazette_app)
login_publisher(publisher)
def statistic(state, sheet_name, qs=None):
result = publisher.get(
'/notices/{}/statistics-xlsx?{}'.format(state, qs or '')
)
book = load_workbook(BytesIO(result.body))
for sheet in book.worksheets:
if sheet.title == sheet_name:
return [
[
sheet.cell(row + 1, col + 1).value
for col in range(sheet.max_column)
]
for row in range(sheet.max_row)
]
# No notices yet
states = ('drafted', 'submitted', 'accepted', 'rejected')
for s in states:
editor.get('/notices/{}/statistics'.format(s), status=403)
editor.get('/notices/{}/statistics-xlsx'.format(s), status=403)
publisher.get('/notices/{}/statistics'.format(s))
assert statistic(s, 'Organisationen') == [['Organisation', 'Anzahl']]
assert statistic(s, 'Rubriken') == [['Rubrik', 'Anzahl']]
assert statistic(s, 'Gruppen') == [['Gruppe', 'Anzahl']]
# Add users and groups
admin = Client(gazette_app)
login_admin(admin)
manage = admin.get('/groups').click("Neu")
for group in ('A', 'B', 'C'):
manage.form['name'] = group
manage.form.submit()
manage = admin.get('/users').click("Neu")
for user, group in (
('a@example.com', 'B'),
('b@example.com', 'B'),
('c@example.com', 'C'),
):
manage.form['role'] = 'member'
manage.form['name'] = user
manage.form['username'] = user
manage.form['group'] = dict(
(x[2], x[0]) for x in manage.form['group'].options
)[group]
with patch('onegov.gazette.views.users.random_password') as password:
password.return_value = 'hunter2'
manage.form.submit().maybe_follow()
user_1 = Client(gazette_app)
user_2 = Client(gazette_app)
user_3 = Client(gazette_app)
for user, client in (
('a@example.com', user_1),
('b@example.com', user_2),
('c@example.com', user_3),
):
login = client.get('/auth/login')
login.form['username'] = user
login.form['password'] = 'hunter2'
login.form.submit()
# Add notices
with freeze_time("2017-11-01 11:00"):
for (organization, category, submit, user, issues) in (
('100', '13', False, editor, ['2017-44']),
('100', '13', False, user_1, ['2017-45']),
('100', '11', False, user_1, ['2017-46']),
('200', '11', False, user_1, ['2017-47']),
('100', '12', True, user_1, ['2017-47', '2017-45']),
('100', '14', True, user_1, ['2017-45', '2017-46']),
('300', '14', True, user_1, ['2017-46']),
('100', '11', False, user_2, ['2017-47']),
('100', '12', True, user_2, ['2017-47']),
('200', '14', False, user_2, ['2017-45', '2017-47']),
('100', '14', True, user_3, ['2017-46']),
('100', '12', True, user_3, ['2017-47']),
('100', '14', False, user_3, ['2017-47']),
('100', '14', True, user_3, ['2017-45', '2017-46', '2017-47']),
):
manage = user.get('/notices/drafted/new-notice')
manage.form['title'] = "Titel"
manage.form['organization'] = organization
manage.form['category'] = category
manage.form['text'] = "Text"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form['issues'] = issues
manage = manage.form.submit().maybe_follow()
if submit:
manage.click("Einreichen").form.submit()
for s in ('rejected', 'accepted'):
assert statistic(s, 'Organisationen') == [['Organisation', 'Anzahl']]
assert statistic(s, 'Rubriken') == [['Rubrik', 'Anzahl']]
assert statistic(s, 'Gruppen') == [['Gruppe', 'Anzahl']]
assert publisher.get('/notices/drafted/statistics')
assert publisher.get('/notices/submitted/statistics')
assert publisher.get('/notices/published/statistics')
# organizations/drafted: 5 x 100, 3 x 200
assert statistic('drafted', 'Organisationen') == [
['Organisation', 'Anzahl'],
['Civic Community', 3],
['State Chancellery', 5]
]
# organizations/submitted: 10 x 100, 1 x 300
assert statistic('submitted', 'Organisationen') == [
['Organisation', 'Anzahl'],
['Municipality', 1],
['State Chancellery', 10],
]
# organizations/submitted/2017-45/46: 6 x 100, 1 x 300
assert statistic(
'submitted', 'Organisationen',
'from_date=2017-11-10&to_date=2017-11-17'
) == [
['Organisation', 'Anzahl'],
['Municipality', 1],
['State Chancellery', 6],
]
# categories/drafted: 3 x 11, 2 x 13, 3 x 14
assert statistic('drafted', 'Rubriken') == [
['Rubrik', 'Anzahl'],
['Commercial Register', 2],
['Education', 3],
['Elections', 3],
]
# categories/submitted: 4 x 12, 7 x 14
assert statistic('submitted', 'Rubriken') == [
['Rubrik', 'Anzahl'],
['Elections', 7],
['Submissions', 4],
]
# categories/submitted/2017-45/46: 1 x 12, 6 x 14
assert statistic(
'submitted', 'Rubriken',
'from_date=2017-11-10&to_date=2017-11-17'
) == [
['Rubrik', 'Anzahl'],
['Elections', 6],
['Submissions', 1],
]
# groups/drafted: 1 x w/o, 6 x B, 1 x C
assert '>5</td>' in publisher.get('/notices/drafted/statistics')
assert statistic('drafted', 'Gruppen') == [
['Gruppe', 'Anzahl'],
['B', 6],
['C', 1],
]
# groups/submitted: 6 x B, 5 x C
assert '>4</td>' in publisher.get('/notices/submitted/statistics')
assert statistic('submitted', 'Gruppen') == [
['Gruppe', 'Anzahl'],
['B', 6],
['C', 5],
]
# groups/submitted/2017-45/46: 4 x B, 3 x C
assert statistic(
'submitted', 'Gruppen',
'from_date=2017-11-10&to_date=2017-11-17'
) == [
['Gruppe', 'Anzahl'],
['B', 4],
['C', 3],
]
def test_view_notices_statistics_rejected(gazette_app):
admin, editor_1, editor_2, editor_3, publisher = login_users(gazette_app)
def statistic():
result_html = publisher.get('/notices/drafted/statistics')
result_html = result_html.pyquery('.statistics-rejected tbody td')
result_html = [
[result_html[index].text, int(result_html[index + 1].text)]
for index in range(0, len(result_html), 2)
]
result_xslx = publisher.get('/notices/drafted/statistics-xlsx')
book = load_workbook(BytesIO(result_xslx.body))
sheet = book['Zurückgewiesen']
result_xslx = [
[
sheet.cell(row + 1, 1).value, sheet.cell(row + 1, 2).value
]
for row in range(1, sheet.max_row)
]
assert result_html == result_xslx
return result_html
assert statistic() == []
# Add notices
with freeze_time("2017-11-01 11:00", tick=True):
for user in 5 * [editor_1] + 2 * [editor_2] + 3 * [editor_3]:
manage = user.get('/notices/drafted/new-notice')
manage.form['title'] = "Titel"
manage.form['organization'] = '100'
manage.form['category'] = '13'
manage.form['text'] = "Text"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form['issues'] = ['2017-44']
manage = manage.form.submit().maybe_follow()
manage = manage.click("Einreichen").form.submit()
assert "Meldung eingereicht" in manage.maybe_follow()
assert statistic() == []
with freeze_time("2017-11-01 12:00", tick=True):
for url in publisher.get('/notices/submitted').pyquery('td a'):
manage = publisher.get(url.attrib['href']).click("Zurückweisen")
manage.form['comment'] = 'XYZ'
manage = manage.form.submit()
assert "Meldung zurückgewiesen" in manage.maybe_follow()
assert statistic() == [
['First Editor', 5],
['Third Editor', 3],
['Second Editor', 2],
]
def test_view_notices_update(gazette_app):
with freeze_time("2017-11-01 11:00"):
client = Client(gazette_app)
login_publisher(client)
# Add a notice
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = "Erneuerungswahlen"
manage.form['organization'] = '100'
manage.form['category'] = '11'
manage.form['issues'] = ['2017-44', '2017-46']
manage.form['text'] = "1. Oktober 2017"
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage.form.submit()
client.get('/notice/erneuerungswahlen/submit').form.submit()
client.get('/notice/erneuerungswahlen/accept').form.submit()
manage = client.get('/notice/erneuerungswahlen')
assert 'State Chancellery' in manage
assert 'Education' in manage
# Change the category and organization of the notice
# (don't change the category or organization because of the observers!)
transaction.begin()
session = gazette_app.session()
notice = session.query(GazetteNotice).one()
notice.category = 'Edurcatio'
notice.organization = 'Sate Chancelery'
transaction.commit()
manage = client.get('/notice/erneuerungswahlen')
assert 'Education' not in manage
assert 'Edurcatio' in manage
assert 'State Chancellery' not in manage
assert 'Sate Chancelery' in manage
# Update all notices
manage = client.get('/notices/submitted/update')
manage = manage.form.submit().maybe_follow()
assert "Meldungen aktualisiert." in manage
manage = client.get('/notice/erneuerungswahlen')
assert 'Education' not in manage
assert 'Edurcatio' in manage
assert 'State Chancellery' not in manage
assert 'Sate Chancelery' in manage
manage = client.get('/notices/accepted/update')
manage = manage.form.submit().maybe_follow()
assert "Meldungen aktualisiert." in manage
manage = client.get('/notice/erneuerungswahlen')
assert 'Education' in manage
assert 'Edurcatio' not in manage
assert 'State Chancellery' in manage
assert 'Sate Chancelery' not in manage
def test_view_notices_publishing_disabled(gazette_app):
client = Client(gazette_app)
login_publisher(client)
assert "notices/published" in client.get('/notices/drafted')
assert "notices/published" in client.get('/notices/drafted/statistics')
principal = gazette_app.principal
principal.publishing = False
gazette_app.cache.set('principal', principal)
assert "notices/rejected" in client.get('/notices/drafted')
assert "notices/published" not in client.get('/notices/drafted')
assert "notices/rejected" in client.get('/notices/published')
assert "notices/published" in client.get('/notices/published')
assert "notices/rejected" in client.get('/notices/drafted/statistics')
assert "notices/published" not in client.get('/notices/drafted/statistics')
assert "notices/rejected" in client.get('/notices/published/statistics')
assert "notices/published" in client.get('/notices/published/statistics')
def test_view_notices_importation(gazette_app):
client = Client(gazette_app)
login_publisher(client)
assert "notices/rejected" in client.get('/notices/drafted')
assert "notices/imported" not in client.get('/notices/drafted')
assert "notices/rejected" in client.get('/notices/drafted/statistics')
assert "notices/imported" not in client.get('/notices/drafted/statistics')
principal = gazette_app.principal
principal.sogc_import = {
'canton': 'GV',
'endpoint': 'https://localhost',
'username': 'user',
'password': 'pass',
'category': 100,
'organiaztion': 200
}
gazette_app.cache.set('principal', principal)
assert "notices/imported" in client.get('/notices/drafted')
assert "notices/rejected" in client.get('/notices/drafted')
assert "notices/imported" in client.get('/notices/drafted/statistics')
assert "notices/rejected" in client.get('/notices/drafted/statistics')
|
def uniquePaths(m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
memo = [ [0] * n for _ in xrange(m)]
def findPath(memo, index1, index2):
if memo[index1][index2] != 0:
return memo[index1][index2]
if index1 >= m-1:
return 1
if index2 >= n-1:
return 1
#go down
memo[index1+1][index2] = findPath(memo, index1+1, index2)
#go right
memo[index1][index2+1] = findPath(memo, index1, index2+1)
memo[index1][index2] = memo[index1+1][index2] + memo[index1][index2+1]
return memo[index1][index2]
memo[0][0] = findPath(memo, 0, 0)
return memo[0][0]
print(uniquePaths(7,3))
|
#!/usr/bin/python
import gc
class TTest:
def __init__(self):
self.sub_func= None
print 'Created',self
def __del__(self):
self.sub_func= None
print 'Deleted',self
def Print(self):
print 'Print',self
def SubFunc1(t):
t.Print()
def DefineObj1():
t= TTest()
t.sub_func= lambda: SubFunc1(t)
return t
t= DefineObj1()
t.sub_func()
#t.sub_func= None
#gc.collect()
#print gc.garbage
del t
#gc.collect()
#print gc.garbage
print '--------'
def DefineObj2():
t= TTest()
def SubFunc2():
t.Print()
t.sub_func= SubFunc2
return t
t= DefineObj2()
t.sub_func()
#t.sub_func= None
del t
print '--------'
import weakref
def SubFunc1_2(t): #== SubFunc1
t.Print()
def DefineObj1_2():
t= TTest()
tr= weakref.ref(t)
t.sub_func= lambda: SubFunc1_2(tr())
return t
t= DefineObj1_2()
t.sub_func()
del t
print '--------'
def DefineObj2_2():
t= TTest()
tr= weakref.ref(t)
def SubFunc2_2(): #!=SubFunc2
tr().Print()
t.sub_func= SubFunc2_2
return t
t= DefineObj2_2()
t.sub_func()
#t.sub_func= None
del t
print '--------'
def DefineObj2_3():
t= TTest()
tr= weakref.ref(t)
def SubFunc2_3(t): #!=SubFunc2
t.Print()
t.sub_func= lambda: SubFunc2_3(tr())
return t
t= DefineObj2_3()
t.sub_func()
#t.sub_func= None
del t
print '--------'
def ForceDelete(obj, exclude=[]):
for (k,v) in obj.__dict__.iteritems():
if not k in exclude:
obj.__dict__[k]= None
t= DefineObj1()
t.sub_func()
ForceDelete(t)
del t
print '--------'
import types
#def RegisterFunc(self,func,name):
#self.__dict__[name]= types.MethodType(func,self)
#def SubFunc1_3(self):
#self.Print()
#def DefineObj1_3():
#t= TTest()
#t.RegisterFunc(SubFunc1_3,'sub_func')
##t.sub_func= SubFunc1_3
#return t
#t= DefineObj1_3()
#t.sub_func()
#del t
#print '--------'
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.11 on 2017-05-06 15:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DbConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('key', models.CharField(db_index=True, max_length=250, unique=True, verbose_name='Ключ')),
('verbose_title', models.CharField(max_length=250, verbose_name='Что означает')),
('value', models.TextField(blank=True, verbose_name='Значение')),
('value_ru', models.TextField(blank=True, null=True, verbose_name='Значение')),
('value_en', models.TextField(blank=True, null=True, verbose_name='Значение')),
('value_fr', models.TextField(blank=True, null=True, verbose_name='Значение')),
],
options={
'verbose_name': 'Переменная',
'verbose_name_plural': 'Переменные шаблонов',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('slug', models.SlugField(unique=True, verbose_name='Алиас')),
('comment', models.TextField(blank=True, null=True, verbose_name='Комментарий')),
],
options={
'verbose_name': 'Меню',
'verbose_name_plural': 'Меню',
},
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('li_class_name', models.CharField(blank=True, max_length=50, null=True, verbose_name='CSS-класс (li тэг)')),
('a_class_name', models.CharField(blank=True, max_length=50, null=True, verbose_name='CSS-класс for link (a тэг)')),
('url', models.CharField(max_length=255, verbose_name='Ссылка')),
('url_ru', models.CharField(max_length=255, null=True, verbose_name='Ссылка')),
('url_en', models.CharField(max_length=255, null=True, verbose_name='Ссылка')),
('url_fr', models.CharField(max_length=255, null=True, verbose_name='Ссылка')),
('title', models.CharField(blank=True, max_length=255, verbose_name='Заголовок')),
('title_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок')),
('title_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок')),
('title_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок')),
('alt', models.CharField(blank=True, max_length=255, null=True, verbose_name='Текст при наведении')),
('alt_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Текст при наведении')),
('alt_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Текст при наведении')),
('alt_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Текст при наведении')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='general.Menu', verbose_name='Меню')),
('parent_item', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='general.MenuItem', verbose_name='Родительский пункт меню')),
],
options={
'verbose_name': 'Пункт меню',
'verbose_name_plural': 'Пункты меню',
},
),
]
|
import cv2
import sys
import numpy as np
class WeedDetection:
def __init__(self,img):
self.height = img.shape[0]
self.width = img.shape[1]
self.part_width = img.shape[1]//3
def preprocess(self, img):
'''
Blur da imagem e converte em HSV
'''
kernel_size = 15
img_blur = cv2.medianBlur(img, 15)
img_hsv = cv2.cvtColor(img_blur,cv2.COLOR_BGR2HSV)
return img_hsv
def createMask(self, img_hsv):
'''
Cria mascara verde
'''
sensitivity = 20
lower_bound = np.array([50 - sensitivity, 100, 60])
upper_bound = np.array([50 + sensitivity, 255, 255])
msk = cv2.inRange(img_hsv, lower_bound, upper_bound)
return msk
def transform(self, msk):
'''
erosao e dilatacao
'''
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
res_msk = cv2.morphologyEx(msk, cv2.MORPH_OPEN, kernel)
res_msk = cv2.morphologyEx(res_msk, cv2.MORPH_CLOSE, kernel)
return res_msk
def calcPercentage(self, msk):
'''
porcentagem em branco
'''
height, width = msk.shape[:2]
num_pixels = height * width
count_white = cv2.countNonZero(msk)
percent_white = (count_white/num_pixels) * 100
percent_white = round(percent_white,2)
return percent_white
def weedPercentage(self, msk):
'''
Divide a mascara em 3 partes e calcula a porcentagem de verde
'''
left_part = msk[:,:self.part_width]
mid_part = msk[:,self.part_width:2*self.part_width]
right_part = msk[:,2*self.part_width:]
left_percent = self.calcPercentage(left_part)
mid_percent = self.calcPercentage(mid_part)
right_percent = self.calcPercentage(right_part)
return [left_percent, mid_percent, right_percent]
def markPercentage(self, img, percentage):
'''
marca as porcentagens na imagem
'''
part_width = self.width//3
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(3):
cv2.putText(img, str(percentage[i]) + "%", (int(part_width*(i + 0.34)), self.height//2), font, 1, (0,0,255), 3, cv2.LINE_AA)
return img
def main():
cli_args = sys.argv[1:]
if len(cli_args) != 1:
print("python segmentation.py image_path")
sys.exit(1)
img_path = cli_args[0]
img = cv2.imread(img_path)
print(img.shape)
img_resize = cv2.resize(img, (800,500))
wd = WeedDetection(img)
img_hsv = wd.preprocess(img)
msk1 = wd.createMask(img_hsv)
blurred = cv2.medianBlur(img, 21)
edges = cv2.Canny(blurred, 50, 150)
msk = wd.transform(msk1)
percentage = wd.weedPercentage(msk)
res = wd.markPercentage(img_resize, percentage)
res_msk = cv2.bitwise_and(img,img,mask = msk)
cv2.imshow('Res',res)
cv2.imshow('Mask', res_msk)
cv2.imshow('Frame',img)
cv2.imshow('hsv',img_hsv)
cv2.imshow('msk1',msk1)
cv2.imshow('msk2',msk)
cv2.imshow('blurred',blurred)
cv2.imshow('blurred',blurred)
cv2.imshow('edges',edges)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
from PyQt5.QtWidgets import *
import sys
app = QApplication([])
widget = QWidget()
def showMsg():
QMessageBox.information(widget,'信息提示框','ok,弹出测试信息')
btn =QPushButton('测试点击按钮',widget)
btn.clicked.connect(showMsg)
widget.show()
sys.exit(app.exec()) |
import logging
from django.core.management import BaseCommand
from logging_sample.management.commands.utils.util_sample import logger_util
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(*args, **options):
# logging_sample.management.commands.command_sample
print(__name__)
print("start")
logger.info("logger start")
logger_util()
print("proc")
logger.info("logger proc")
# time.sleep(60)
print("end")
logger.info("logger end")
|
from django.db import models
#Modelo Grafica
class Grafica(models.Model):
#https://mc.ai/integrar-modelo-de-red-neuronal-convolucional-en-django/
# file will be uploaded to MEDIA_ROOT / uploads
imagen = models.ImageField(upload_to ='uploads/')
# or...
# file will be saved to MEDIA_ROOT / uploads / 2015 / 01 / 30
# upload = models.ImageField(upload_to ='uploads/% Y/% m/% d/')
titulo = models.CharField(max_length=100, blank=True)
def __str__(self):
return str(self.titulo)
# Modelo Libro
class Libro(models.Model):
titulo=models.CharField(max_length=30)
descripcion=models.TextField()
def __str__(self):
return str(self.titulo) + ':' + str(self.descripcion)
# Modelo Cliente
class Cliente(models.Model):
#codigo=models.AutoField(primary_key=True)
cedula = models.CharField(max_length=10) #DNI
edad = models.IntegerField() #Edad
tipoCliente = models.CharField(max_length=1, blank=True)
def __str__(self):
return str(self.cedula) + ':' + str(self.edad) + ':' + str(self.tipoCliente)
#Modelo MAESTRO DETALLE
"""
class Musician(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
instrument = models.CharField(max_length=100)
class Album(models.Model):
artist = models.ForeignKey(Musician, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
release_date = models.DateField()
num_stars = models.IntegerField()
""" |
import os
import pandas as pd
def concatenate_all_games():
games = []
files = os.listdir('../data/merge/')
for file in files:
if '.csv' not in file:
continue
df = pd.read_csv('../data/merge/' + file)
games.append(df)
all_games = pd.concat(games)
all_games.to_csv('../data/merge/all/all_games.csv', index=False)
return
def adjust_all_game_data():
df = pd.read_csv('../data/merge/all/all_games.csv')
df['cumm_2PM'] = 1
df['cumm_2PA'] = 2
df['cumm_3PM'] = 2
df['cumm_3PA'] = 5
games = list(set(df['game_id']))
games.sort()
for game in games:
players = list(set(df[df['game_id'] == game]['player_id']))
for player in players:
last_two_made = list(df[(df['game_id'] == game) & (df['player_id'] == player)]['2PM'])[-1]
last_two_attempted = list(df[(df['game_id'] == game) & (df['player_id'] == player)]['2PA'])[-1]
last_three_made = list(df[(df['game_id'] == game) & (df['player_id'] == player)]['3PM'])[-1]
last_three_attempted = list(df[(df['game_id'] == game) & (df['player_id'] == player)]['3PA'])[-1]
index_of_this_player = df[df['player_id'] == player].index
for index in index_of_this_player:
if df.iloc[index, 0] > game: # for future games only, update the shooting based on this game
df.iloc[index, 21] = df.iloc[index, 21] + last_two_made # cumm 2PM
df.iloc[index, 22] = df.iloc[index, 22] + last_two_attempted # cumm 2PA
df.iloc[index, 23] = df.iloc[index, 23] + last_three_made # cumm 3PM
df.iloc[index, 24] = df.iloc[index, 24] + last_three_attempted # cumm 3PA
player_list = pd.read_csv('../data/player_list.csv')
# create cummulative percentages
df['cumm_2P%'] = df['cumm_2PM'] / df['cumm_2PA']
df['cumm_3P%'] = df['cumm_3PM'] / df['cumm_3PA']
# get information from the player_list db (height, rookie, ts%)
df['height'] = 0
df['rookie'] = 0
df['ts%'] = 0
for i, player_id in enumerate(df['player_id']):
height = list(player_list[player_list['player_id'] == player_id]['height'])[0]
rookie = list(player_list[player_list['player_id'] == player_id]['rookie'])[0]
ts = list(player_list[player_list['player_id'] == player_id]['TS'])[0]
df.iloc[i, 27] = height
df.iloc[i, 28] = rookie
df.iloc[i, 29] = ts
# drop all hook shots
df.drop(df[df['type_of_shot'] == 3].index, inplace=True)
# create seperate column for all type of shots (for modelling purposes - categoric variable)
df['jump_shot'] = 0
df['dribble'] = 0
df['layup'] = 0
df['dunk'] = 0
for i in range(len(df)):
if df.iloc[i, 9] == 1: # if jump shot
df.iloc[i, 30] = 1 # mark the jump shot column
if df.iloc[i, 9] == 2: # if it is a dribble jump shot
df.iloc[i, 30] = 1 # mark the jump shot column
df.iloc[i, 31] = 1 # mark the dribble column
if df.iloc[i, 9] == 4: # if layup
df.iloc[i, 32] = 1 # mark the layup column
if df.iloc[i, 9] == 5: # if dunk
df.iloc[i, 33] = 1 # mark the dunk column
# same practice but for quarters
df['quarter_1'] = 0
df['quarter_2'] = 0
df['quarter_3'] = 0
df['quarter_4'] = 0
for i in range(len(df)):
if df.iloc[i, 6] == 1: # if quarter 1
df.iloc[i, 34] = 1
if df.iloc[i, 6] == 2: # if quarter 2
df.iloc[i, 35] = 1
if df.iloc[i, 6] == 3: # if quarter 3
df.iloc[i, 36] = 1
if df.iloc[i, 6] == 4: # if quarter 4
df.iloc[i, 37] = 1
# change label from 1,2 to 1,0
for i in range(len(df)):
if df.iloc[i, 5] == 2:
df.iloc[i, 5] = 0
# remove columns that will not be used
df.drop(['player_name'], axis=1, inplace=True) # remove player_name
df.drop(['player_id'], axis=1, inplace=True) # remove player_id
df.drop(['team_id'], axis=1, inplace=True) # remove player_id
df.drop(['quarter'], axis=1, inplace=True) # remove quarter
df.drop(['seconds_left'], axis=1, inplace=True) # remove seconds_left
df.drop(['desc'], axis=1, inplace=True) # remove desc
df.drop(['type_of_shot'], axis=1, inplace=True) # remove type_of_shot
df.drop(['3PT'], axis=1, inplace=True) # remove 3PT
df.drop(['2PM'], axis=1, inplace=True) # remove 2PM
df.drop(['2PA'], axis=1, inplace=True) # remove 2PA
df.drop(['3PM'], axis=1, inplace=True) # remove 3PM
df.drop(['3PA'], axis=1, inplace=True) # remove 3PA
df.drop(['cumm_2PM'], axis=1, inplace=True) # remove cumm_2PM
df.drop(['cumm_2PA'], axis=1, inplace=True) # remove cumm_2PA
df.drop(['cumm_3PM'], axis=1, inplace=True) # remove cumm_3PM
df.drop(['cumm_3PA'], axis=1, inplace=True) # remove cumm_3PA
labels = pd.DataFrame(df['shot_made']) # create the new labels dataframe
labels.columns = ['make'] # change the name
df.drop(['shot_made'], axis=1, inplace=True) # remove cumm_3PA
df.to_csv('../data/merge/all/final_version.csv', index=False)
labels.to_csv('../data/merge/all/labels.csv', index=False)
# execute function (uncomment if you want to run)
# concatenate_all_games()
# adjust_all_game_data()
|
from django.contrib import admin
from .models import *
from LandingPage.admin import *
from mailing.views import *
from django.contrib import messages
from django.contrib.auth.models import Group
class ExperienceInline(admin.StackedInline):
model = Experience
can_delete = False
verbose_name_plural = 'Experience'
fk_name = 'facilitator'
class FacilitatorQueriesInline(admin.StackedInline):
model = FacilitatorQueries
can_delete = False
verbose_name_plural = 'Facilitator queries'
fk_name = 'user'
class ApplicantsAdmin(admin.ModelAdmin):
inlines = (ExperienceInline,FacilitatorQueriesInline )
list_display=('Aid','name','user','phone','intrest','status')
list_select_related = ('facilitator', 'experience')
list_display_links=['name','user','phone','intrest','status']
search_fields = ( 'name',)
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(ApplicantsAdmin, self).get_inline_instances(request, obj)
def approve_facilitator(self,request , queryset):
for user in queryset:
check=None
try:
check=Facilitator.objects.get(user=user)
except:
check=None
if check is None:
facilitator=Facilitator.objects.create(name=user.name,phone=user.phone,user=user)
facilitator.save()
user.status='Approved'
group = Group.objects.get(name='Facilitators')
user.user.groups.add(group)
user.save()
successOnRegistration(user.user)
messages.success(request, (user.name+' is approved !'))
else:
user.status='Approved'
user.save()
messages.info(request, (check.name+' is already approved !'))
def shortlisted_facilitator(self,request , queryset):
li=[]
for applicant in queryset:
if applicant.status=='Shortlisted':
messages.error(request, (applicant.name+' is already shortlisted !'))
else:
applicant.status='Shortlisted'
applicant.save()
li.append(applicant.user.email)
successOnShortlisted(applicant.user)
messages.info(request, (applicant.name+' is shortlisted !'))
def OnHold_facilitator(self,request , queryset):
for applicant in queryset:
if applicant.status=='On Hold':
messages.error(request, (applicant.name+' is already On Hold !'))
else:
applicant.status='On Hold'
applicant.save()
messages.info(request, (applicant.name+' is On Hold !'))
def Rejected_facilitator(self,request , queryset):
for applicant in queryset:
if applicant.status=='Rejected':
messages.error(request, (applicant.name+' is already Rejected !'))
else:
applicant.status='Rejected'
applicant.save()
messages.info(request, (applicant.name+' is Rejected !'))
OnHold_facilitator.short_description = 'On Hold'
Rejected_facilitator.short_description = 'Rejected'
shortlisted_facilitator.short_description = 'Shortlisted'
approve_facilitator.short_description = 'Approve'
actions = [approve_facilitator, shortlisted_facilitator, OnHold_facilitator, Rejected_facilitator]
# class ExperienceAdmin(admin.ModelAdmin):
# list_display=('Eid','Linkedin_Url','Website_Url','Youtube_Url','RExperience','TExperience','facilitator')
# class FacilitatorQueriesAdmin(admin.ModelAdmin):
# ist_display=('Qid','query','status','user')
class FacilitatorAdmin(admin.ModelAdmin):
list_display=('Fid','name','DOB','phone','PAddress','TAddress','profile','Bio','country','state','zipcode','user')
inlines = (offer_inline,)
list_display_links=['name','DOB','phone','PAddress','TAddress','profile','Bio','country','state',]
admin.site.register(Facilitator,FacilitatorAdmin)
admin.site.register(Applicants,ApplicantsAdmin)
admin.site.register(OTP)
|
#!/usr/bin/env python
"""
File: reduce_features
Date: 12/7/18
Author: Robert Neff (rneff@stanford.edu)
"""
import os
import csv
import numpy as np
'''
Builds kaggle submission csv file for predictions dictionary
of the form: id:multi_hot_labels.
ex. "id1":[0, 5, 11]
'''
def build_kaggle_submission(predictions_dict, save_dir):
assert isinstance(predictions_dict, dict)
assert isinstance(save_dir, str)
filepath = os.path.join(save_dir, "submission.csv")
with open(filepath, "w+", newline="") as csvfile:
writer = csv.writer(csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Ids", "Predicted"])
for id, labels in predictions_dict.items():
writer.writerow([str(id), " ".join(map(str, labels))])
return filepath
|
#
"""
Created on Tue Dec 22 17:46:01 2020
@author: daniele
"""
import numpy as np
import matplotlib.pyplot as plt
#Sequential mi costruisce la rete nurale
from keras.models import Sequential
#dense è un metodo che mi permette di collegari i neuroni del livello precedente con i nodi del livello attuale
from keras.layers import Dense
#prendo dal orgetto mnist la funzione load_mnist
from mnist import load_mnist
x_train, x_test, y_train, y_test = load_mnist (path="C:/Users/daniele/Desktop/Reti_Neuralipy/MNIST")
#stampa del primo elemento di x_train
plt.imshow(x_train[0].reshape([28,28]),cmap = "gray")
plt.axis('off') # rimuoviamo i valori sulle assi
print("La cifra nell'immagine è un %d" % y_train[0])
#Pre_processing
#Normalizzazione
#si divide ogni pixel con il pixel massimo che è 255
x_train=x_train/255
x_test=x_test/255
#siccome abbiamo 10 categorie utiliziamo la funzione di mkeras per categorizare
from keras.utils import to_categorical
num_class=10
y_train_nuovo=to_categorical(y_train,num_class)
y_test_nuovo=to_categorical(y_test,num_class)
#Creo il modello
model=Sequential()
#il primo livello nascosto avra 512 noti
model.add(Dense(512,input_dim=x_train.shape[1],activation='relu'))
#aggiungo un altro livello
model.add(Dense(256,activation='relu'))
#terzo livello nascosto
model.add(Dense(128, activation='relu'))
#ultimo livello per output che avra il numero di nodi=num_class
#con la multiclasse come attivazione si passa softmax
model.add(Dense(num_class,activation='softmax'))
#analiziamo
model.summary()
#preparazione fase addrestramento
model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
model.fit(x_train,y_train_nuovo, epochs=20)
#verifichiamo sul set di test
print('\n\n\n set test')
model.evaluate(x_test,y_test_nuovo)
|
#!/bin/python
import sys
import math
map = []
infile = open(sys.argv[1], "r")
for line in infile:
map.append(list(line.rstrip()))
width = len(map[0])
height = len(map)
print (width)
print (height)
move = (3,1)
results = []
for move in [(1,1), (3,1), (5,1), (7,1), (1,2)]:
print (move)
xpos = 0
ypos = 0
tree = 0
clear = 0
while ypos < height:
print ("\t\t", xpos, ypos)
if map[ypos][xpos] == '#':
tree += 1
else:
clear += 1
ypos += move[1]
xpos = (xpos + move[0]) % width
print ("\t", tree, clear)
results.append(tree)
print (math.prod(results)) |
#!/usr/bin/python3
def multiple_returns(sentence):
return (0, None) if not sentence else (len(sentence), sentence[0])
|
''' Tests for chronicler.decorators.audits '''
from mock import Mock
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from chronicler.models import AuditItem
from chronicler.decorators import audits
from chronicler.tests import TestCase
from chronicler.tests.models import Person, Group
@audits(Person, ['group_set'], 'pk', 'person_pk', 'POST')
def fake_view_post(request):
pass
@audits(Person, ['group_set'], 'pk', 'person_pk', 'GET')
def fake_view_get(request):
pass
@audits(Person, ['group_set'], 'pk', 'person_pk')
def fake_view(request, person_pk):
pass
@audits(Person, ['group_set'], 'pk', 'person_pk', force=True)
def fake_view_force(request, person_pk):
pass
class TestCreateAuditEntry(TestCase):
def setUp(self):
super(TestCreateAuditEntry, self).setUp()
self.user, _ = User.objects.get_or_create(username='analyte')
self.content_type = ContentType.objects.get_for_model(Person)
self.person = Person.objects.create(name='Tester')
def test_create_post_key(self):
''' Test that asserts we can get our object from the POST variables
when necessary
'''
assert not AuditItem.objects.all()
request = Mock(POST={'person_pk': self.person.pk}, user=self.user)
fake_view_post(request)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
def test_create_get_key(self):
''' Test that asserts we can get our object from the GET variables
when necessary
'''
assert not AuditItem.objects.all()
request = Mock(GET={'person_pk': self.person.pk}, user=self.user)
fake_view_get(request)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
def test_create_simple_view(self):
''' Test that proves that we can grab our necessary data to get an
object from the request path
'''
assert not AuditItem.objects.all()
request = Mock(user=self.user)
fake_view(request, person_pk=self.person.pk)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
def test_prevent_audit_dupes(self):
''' Test that asserts that when nothing changes, we don't create
another audit item with identical changes
'''
assert not AuditItem.objects.all()
request = Mock(user=self.user)
fake_view(request, person_pk=self.person.pk)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
fake_view(request, person_pk=self.person.pk)
audit_items = AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
self.assertEqual(audit_items.count(), 1)
def test_audits_force_create_dupes(self):
''' Test that asserts that even when we find nothing changes,
that we will create a dupe if force is set to True
'''
assert not AuditItem.objects.all()
request = Mock(user=self.user)
fake_view(request, person_pk=self.person.pk)
assert AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
fake_view_force(request, person_pk=self.person.pk)
audit_items = AuditItem.objects.filter(
content_type=self.content_type,
object_id=self.person.pk)
self.assertEqual(audit_items.count(), 2)
|
from django.shortcuts import render
from django import forms
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
from django.views.generic import ListView
from django.contrib.contenttypes.models import ContentType
from .models import Transaction, Pet, PetSupply, TransactionRequest
# Create your views here.
class PetTransactionListView(ListView):
model = Transaction
paginate_by = 20
context_object_name = 'pet_list'
template_name = "puppy_sale/pet_transaction.html"
queryset = Transaction.objects.filter(transactable_type__model='Pet')
class PetSupplyTransactionListView(ListView):
model = Transaction
paginate_by = 20
template_name = "puppy_sale/pet_supply_transaction.html"
context_object_name = 'pet_supply_list'
queryset = Transaction.objects.filter(transactable_type__model='PetSupply')
class PetTransactionCreateView(LoginRequiredMixin, CreateView):
model = Transaction
template_name = "puppy_sale/pet_transaction_create.html"
success_url = '/sales/pets'
fields = ['transactable_id', 'transactable_name', 'price', 'deadline', 'description']
def get_form(self, *args, **kwargs):
form = super(PetTransactionCreateView, self).get_form(*args, **kwargs)
form.fields['transactable_id'] = \
forms.ChoiceField(choices=[
(pet.pk, pet.name) for pet in Pet.objects.filter(owner=self.request.user)])
form.fields['transactable_name'].widget = forms.HiddenInput()
return form
def form_valid(self, form):
form.instance.transactable_type = ContentType.objects.get_for_model(Pet)
form.instance.owner = self.request.user
form.instance.owner_name = self.request.user.real_name
form.instance.owner_address = self.request.user.address1
return super().form_valid(form)
class PetSupplyTransactionCreateView(LoginRequiredMixin, CreateView):
model = Transaction
template_name = "puppy_sale/pet_supply_transaction_create.html"
success_url = '/sales/pet_supplies'
fields = ['transactable_id', 'transactable_name', 'price', 'deadline', 'description']
def get_form(self, *args, **kwargs):
form = super(PetSupplyTransactionCreateView, self).get_form(*args, **kwargs)
form.fields['transactable_id'] = \
forms.ChoiceField(choices=[
(pet_supply.pk, pet_supply.name) for pet_supply in PetSupply.objects.all()])
form.fields['transactable_name'].widget = forms.HiddenInput()
return form
def form_valid(self, form):
form.instance.transactable_type = ContentType.objects.get_for_model(PetSupply)
form.instance.owner = self.request.user
form.instance.owner_name = self.request.user.real_name
form.instance.owner_address = self.request.user.address1
return super().form_valid(form)
class TransactionRequestListView(ListView):
model = TransactionRequest
paginate_by = 10
class PetDetailView(object):
pass
class PetSupplyDetailView(object):
pass
|
import os
path = os.path
from myhdl import *
from lift_step import lift_step
from signed2twoscomplement import signed2twoscomplement
from mux import mux_data
from ram import ram
from fifo import fifo
from rd_pc import pc_read
from PIL import Image
W0 = 9
im = Image.open("../../lena_256.png")
pix = im.load()
w, h = im.size
m = list(im.getdata())
#print m.__sizeof__()
m = [m[i:i+im.size[0]] for i in range(0, len(m), im.size[0])]
#print m
#print m[0][0], m[1][0],m[2][0],m[3][0],m[4][0],m[5][0],m[6][0]
#print m[248][0],m[249][0], m[250][0],m[251][0],m[252][0],m[253][0],m[254][0]
W0 = 9
dout = Signal(intbv(0)[W0:])
din = Signal(intbv(0)[W0:])
addr = Signal(intbv(0)[8:])
we = Signal(bool(0))
clk = Signal(bool(0))
we_in = Signal(bool(0))
we_1 = Signal(bool(0))
addr_in = Signal(intbv(0)[8:])
toLift_Step = Signal(intbv(0)[W0:])
data_in = Signal(intbv(0)[W0:])
pc_data_in = Signal(intbv(0)[2:])
pc_data_rdy = Signal(intbv(0)[2:])
z = Signal(intbv(0)[W0:])
zfifo = Signal(intbv(0)[W0:])
read_pc_i = Signal(bool(0))
muxsel_i = Signal(bool(0))
muxaddrsel = Signal(intbv(0)[2:])
x = Signal(intbv(0, min= -(2**(W0)) ,max= (2**(W0))))
xfifo = Signal(intbv(0, min= -(2**(W0)) ,max= (2**(W0))))
res_o = Signal(intbv(0, min=-(2**(W0)), max=(2**(W0))))
left_i = Signal(intbv(0)[W0:])
right_i = Signal(intbv(0)[W0:])
sam_i = Signal(intbv(0)[W0:])
flgs_i = Signal(intbv(0)[4:])
update_i = Signal(bool(0))
update_o = Signal(bool(0))
SOF = Signal(bool(0))
syncFlag = Signal(bool(0))
rst_fsm = Signal(bool(1))
addr_left = Signal(intbv(0)[8:])
addr_sam = Signal(intbv(0)[8:])
addr_rht = Signal(intbv(0)[8:])
do_first = Signal(bool(0))
end_of_col = Signal(bool(0))
'''data from usb hostio'''
pc_data_in = Signal(intbv(0)[2:])
pc_data_rdy = Signal(intbv(0)[2:])
data_pc_in = Signal(bool(0))
addr_in_toLift_Step = Signal(intbv(0)[8:])
del_ctn = Signal(intbv(0)[8:])
t_State = enum('INIT', 'ODD_L', 'ODD_S', 'ODD_R', 'RD_RAM_LF', 'RD_RAM_SA', 'RD_RAM_RT', 'LIFT', 'LIFT_EXE', 'LIFT_RD', 'LIFT_WR', 'LIFT_DEL1', 'LIFT_DEL2', 'LIFT_DEL3', 'EVEN_L', 'EVEN_S', 'EVEN_R', 'RD_FIFO', 'RD_FIFO_DEL', 'RD_FIFO_DEL1', 'RD_FIFO_DEL2', 'RD_FIFO_DEL3','RD_FIFO_DEL4','RD_FIFO_DEL5','RD_FIFO_DEL6','RD_FIFO_DEL7','RD_FIFO_DEL8','DONE', encoding="one_hot")
state = Signal(t_State.INIT)
reset_dly_c = 10
ASZ = 8
DSZ = 9
NO = bool(0)
YES = bool(1)
clk = Signal(bool(0))
enw_r = Signal(bool(0))
enr_r = Signal(bool(0))
empty_r = Signal(bool(0))
full_r = Signal(bool(0))
dataout_r = Signal(intbv(0)[DSZ:])
datain_r = Signal(intbv(0)[DSZ:])
enw_ro = Signal(bool(0))
enr_ro = Signal(bool(0))
empty_ro = Signal(bool(0))
full_ro = Signal(bool(0))
dataout_ro = Signal(intbv(0)[DSZ:])
datain_ro = Signal(intbv(0)[DSZ:])
'''
enw_x = Signal(bool(0))
enr_x = Signal(bool(0))
empty_x = Signal(bool(0))
full_x = Signal(bool(0))
dataout_x = Signal(intbv(0)[DSZ:])
datain_x = Signal(intbv(0)[DSZ:])
'''
readptr = Signal(intbv(0)[ASZ:])
writeptr = Signal(intbv(0)[ASZ:])
mem = [Signal(intbv(0)[DSZ:]) for ii in range(2**ASZ)]
# INIT, READ_DATA, DONE = range(3)
ACTIVE_LOW = bool(0)
def Odd_Even_Fsm(state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht, muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z, flgs_i, update_i, res_o, update_o, end_of_col, addr_in, xfifo, enr_r, enw_r, del_ctn ):
@always(clk.posedge, rst_fsm.negedge)
def FSM():
if rst_fsm == ACTIVE_LOW:
addr_left.next = 0
addr_sam.next = 1
addr_rht.next = 2
do_first.next = 0
flgs_i.next = 7
end_of_col.next = 0
#enr_r.next = 0
#enw_r.next = 0
addr_in.next = 0
state.next = t_State.INIT
else:
if state == t_State.INIT:
we_in.next = 1
muxsel_i.next = 1
enr_r.next = 1
state.next = t_State.RD_FIFO
elif state == t_State.ODD_L:
if (muxsel_i == 0):
if ((addr_left < 254) ):
if (addr_left == 0) and (do_first == 0):
''' do_first goes hi to execute first location'''
#do_first.next = 1
addr_left.next = 0
muxaddrsel.next = 0
we_1.next = 0
state.next = t_State.RD_RAM_LF
else:
addr_left.next = addr_left + 2
muxaddrsel.next = 0
we_1.next = 0
state.next = t_State.RD_RAM_LF
elif state == t_State.ODD_S:
if (addr_sam < 254) :
if (addr_sam == 1)and (do_first == 0):
addr_sam.next = 1
muxaddrsel.next = 1
we_1.next = 0
state.next = t_State.RD_RAM_SA
else:
addr_sam.next = addr_sam + 2
muxaddrsel.next = 1
we_1.next = 0
state.next = t_State.RD_RAM_SA
elif state == t_State.ODD_R:
if (muxsel_i == 0):
if (addr_rht < 254):
if (addr_rht == 2)and (do_first == 0):
do_first.next = 1
addr_rht.next = 2
muxaddrsel.next = 2
we_1.next = 0
state.next = t_State.RD_RAM_RT
else:
addr_rht.next = addr_rht + 2
muxaddrsel.next = 2
we_1.next = 0
state.next = t_State.RD_RAM_RT
else:
addr_left.next = 1
addr_sam.next = 2
addr_rht.next = 3
do_first.next = 0
flgs_i.next = 6
state.next = t_State.EVEN_L
elif state == t_State.EVEN_L:
if (muxsel_i == 0):
if ((addr_left < 254)):
if (addr_left == 1)and (do_first == 0):
''' do_first goes hi to execute first location'''
#do_first.next = 1
addr_left.next = 1
muxaddrsel.next = 0
we_1.next = 0
state.next = t_State.RD_RAM_LF
else:
addr_left.next = addr_left + 2
muxaddrsel.next = 0
we_1.next = 0
state.next = t_State.RD_RAM_LF
elif state == t_State.EVEN_S:
if (addr_sam < 254):
if (addr_sam == 2)and (do_first == 0):
addr_sam.next = 2
muxaddrsel.next = 1
we_1.next = 0
state.next = t_State.RD_RAM_SA
else:
addr_sam.next = addr_sam + 2
muxaddrsel.next = 1
we_1.next = 0
state.next = t_State.RD_RAM_SA
else:
addr_left.next = 1
addr_sam.next = 2
addr_rht.next = 3
state.next = t_State.DONE
elif state == t_State.EVEN_R:
if (muxsel_i == 0):
if (addr_rht < 254):
if (addr_rht == 3)and (do_first == 0):
do_first.next = 1
addr_rht.next = 3
muxaddrsel.next = 2
we_1.next = 0
state.next = t_State.RD_RAM_RT
else:
addr_rht.next = addr_rht + 2
muxaddrsel.next = 2
we_1.next = 0
state.next = t_State.RD_RAM_RT
else:
addr_left.next = 1
addr_sam.next = 2
addr_rht.next = 3
state.next = t_State.DONE
elif state == t_State.DONE:
end_of_col.next = 1
state.next = t_State.DONE
elif state == t_State.RD_RAM_LF:
left_i.next = dout
if (flgs_i == 6):
state.next = t_State.EVEN_S
else:
state.next = t_State.ODD_S
elif state == t_State.RD_RAM_SA:
sam_i.next = dout
if (flgs_i == 6):
state.next = t_State.EVEN_R
else:
state.next = t_State.ODD_R
elif state == t_State.RD_RAM_RT:
right_i.next = dout
state.next = t_State.LIFT
elif state == t_State.LIFT:
'''setting addr to sam'''
#flgs_i.next = 7
update_i.next = 1
we_1.next = 1
muxaddrsel.next = 1
state.next = t_State.LIFT_EXE
elif state == t_State.LIFT_EXE:
muxaddrsel.next = 1
state.next = t_State.LIFT_RD
elif state == t_State.LIFT_RD:
muxaddrsel.next = 1
x.next = res_o[W0:]
state.next = t_State.LIFT_WR
elif state == t_State.LIFT_WR:
muxaddrsel.next = 1
#addr_sam.next = addr_sam + 1
update_i.next = 0
#we_1.next = 0
state.next = t_State.LIFT_DEL1
elif state == t_State.LIFT_DEL1:
muxaddrsel.next = 1
#addr_sam.next = addr_sam + 1
#update_i.next = 0
#we_1.next = 0
state.next = t_State.LIFT_DEL2
elif state == t_State.LIFT_DEL2:
muxaddrsel.next = 1
#addr_sam.next = addr_sam + 1
#update_i.next = 0
#we_1.next = 0
state.next = t_State.LIFT_DEL3
elif state == t_State.LIFT_DEL3:
muxaddrsel.next = 1
#addr_sam.next = addr_sam + 1
#update_i.next = 0
we_1.next = 0
state.next = t_State.ODD_L
elif state == t_State.RD_FIFO:
del_ctn.next = 0
if (addr_in <= 254):
enr_r.next = 0
#xfifo.next = dataout_r[W0:]
state.next = t_State.RD_FIFO_DEL
else:
muxsel_i.next = 0
enr_r.next = 0
we_in.next = 0
state.next = t_State.ODD_L
elif state == t_State.RD_FIFO_DEL:
xfifo.next = dataout_r[W0:]
state.next = t_State.RD_FIFO_DEL1
elif state == t_State.RD_FIFO_DEL1:
#enr_r.next = 0
if (del_ctn < 2):
del_ctn.next = del_ctn + 1
else:
state.next = t_State.RD_FIFO_DEL7
elif state == t_State.RD_FIFO_DEL2:
state.next = t_State.RD_FIFO_DEL3
elif state == t_State.RD_FIFO_DEL3:
state.next = t_State.RD_FIFO_DEL4
elif state == t_State.RD_FIFO_DEL4:
state.next = t_State.RD_FIFO_DEL5
elif state == t_State.RD_FIFO_DEL5:
state.next = t_State.RD_FIFO_DEL6
elif state == t_State.RD_FIFO_DEL6:
state.next = t_State.RD_FIFO_DEL7
elif state == t_State.RD_FIFO_DEL7:
enr_r.next = 1
state.next = t_State.RD_FIFO_DEL8
elif state == t_State.RD_FIFO_DEL8:
#enr_r.next = 1
addr_in.next = addr_in.next + 1
state.next = t_State.RD_FIFO
else:
raise ValueError("Undefined state")
return FSM
def top_odd_even(state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht,
muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z, xfifo,
zfifo, flgs_i, update_i, res_o, update_o, end_of_col, empty_r, full_r,
enr_r, enw_r, dataout_r, datain_r , empty_ro, full_ro, enr_ro, enw_ro,
dataout_ro, datain_ro, addr_in, del_ctn):
instance_Odd_Even_Fsm = Odd_Even_Fsm (state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht, muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z, flgs_i, update_i, res_o, update_o, end_of_col, addr_in, xfifo, enr_r, enw_r, del_ctn)
instance_ram = ram(dout, din, addr, we, clk)
instance_mux_data = mux_data(z, din, data_in, we_1, we, we_in, addr, addr_in, muxsel_i, muxaddrsel, addr_left, addr_sam, addr_rht,zfifo)
instance_signed2twoscomplement = signed2twoscomplement(clk, x, z)
instance_signed2twoscomplementfifo = signed2twoscomplement(clk, xfifo, zfifo)
instance_lift_step = lift_step(left_i, sam_i, right_i, flgs_i, update_i, clk, res_o, update_o)
instance_pc_in = fifo(clk, empty_r, full_r, enr_r, enw_r, dataout_r, datain_r)
instance_pc_out = fifo(clk, empty_ro, full_ro, enr_ro, enw_ro, dataout_ro, datain_ro)
#instance_pd_read = pc_read(clk, data_in, toLift_Step, we_in, addr_in, muxsel_i, datactn_in, datactn, pc_data_in, pc_data_rdy )
return instances()
def tb(state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht,
muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z,xfifo,
zfifo, flgs_i, update_i, res_o, update_o, end_of_col, empty_r, full_r,
enr_r, enw_r, dataout_r, datain_r, empty_ro, full_ro, enr_ro, enw_ro,
dataout_ro, datain_ro, addr_in, del_ctn ):
instance_Odd_Even_Fsm = Odd_Even_Fsm (state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht, muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z, flgs_i, update_i, res_o, update_o, end_of_col, addr_in, xfifo, enr_r, enw_r, del_ctn)
instance_ram = ram(dout, din, addr, we, clk)
instance_mux_data = mux_data(z, din, data_in, we_1, we, we_in, addr, addr_in, muxsel_i, muxaddrsel, addr_left, addr_sam, addr_rht,zfifo)
instance_signed2twoscomplement = signed2twoscomplement(clk, x, z)
instance_signed2twoscomplementfifo = signed2twoscomplement(clk, xfifo, zfifo)
instance_lift_step = lift_step(left_i, sam_i, right_i, flgs_i, update_i, clk, res_o, update_o)
instance_pc_in = fifo(clk, empty_r, full_r, enr_r, enw_r, dataout_r, datain_r)
instance_pc_out = fifo(clk, empty_ro, full_ro, enr_ro, enw_ro, dataout_ro, datain_ro)
@always(delay(10))
def clkgen():
clk.next = not clk
@instance
def stimulus():
rst_fsm.next = 0
yield clk.posedge
muxsel_i.next = 0
yield clk.posedge
enr_r.next = 0
yield clk.posedge
enw_r.next = 0
yield clk.posedge
datain_r.next = m[0][0]
yield clk.posedge
enw_r.next = 1
yield clk.posedge
for j in range(1,255):
k = 0
if (full_r == 0):
datain_r.next = m[j][k]
yield clk.posedge
#print ("%d %d %d %d %d") % (now(), j, enw_r, full_r, m[j][k])
enw_r.next = 0
yield clk.posedge
muxsel_i.next = 0
yield clk.posedge
rst_fsm.next = 1
yield clk.posedge
print ("%d muxsel_i %d rst_fsm %d") % (now(), muxsel_i, rst_fsm)
while (end_of_col == 0):
print ("time %d flgs %d left %d sam %d right %d ") % (now(), flgs_i, left_i, sam_i, right_i)
print ("time %d addr %d din %d we %d ") % (now(), addr, din, we)
yield clk.posedge
print ("%d ") % (now())
print "end of col"
muxsel_i.next = 1
yield clk.posedge
rst_fsm.next = 0
yield clk.posedge
addr_in.next = 0
yield clk.posedge
we_in.next = 0
m[0][k] = dout
datain_ro.next= m[0][k]
yield clk.posedge
print ("%d data to ram %d %d") % (now(), datain_ro, addr_in)
addr_in.next = addr_in + 1
yield clk.posedge
enw_ro.next = 1
yield clk.posedge
for j in range(1,254):
m[0][k] = dout
datain_ro.next= m[j][k]
yield clk.posedge
addr_in.next = addr_in + 1
print ("%d data to ram %d %d") % (now(), datain_ro, addr_in)
yield clk.posedge
enw_ro.next = 1
yield clk.posedge
raise StopSimulation
return instances()
def convert():
toVerilog(top_odd_even,state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht, muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z, xfifo, zfifo, flgs_i, update_i, res_o, update_o, end_of_col, empty_r, full_r, enr_r, enw_r, dataout_r, datain_r, empty_ro, full_ro, enr_ro, enw_ro, dataout_ro, datain_ro, addr_in, del_ctn)
toVHDL(top_odd_even,state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht, muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z, xfifo, zfifo, flgs_i, update_i, res_o, update_o, end_of_col, empty_r, full_r, enr_r, enw_r, dataout_r, datain_r, empty_ro, full_ro, enr_ro, enw_ro, dataout_ro, datain_ro, addr_in, del_ctn)
def iverilogtest(state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht,
muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z,xfifo,
zfifo, flgs_i, update_i, res_o, update_o, end_of_col, empty_r, full_r,
enr_r, enw_r, dataout_r, datain_r, empty_ro, full_ro, enr_ro, enw_ro,
dataout_ro, datain_ro, addr_in, del_ctn):
cmd = "iverilog -o odd_even_fsm top_odd_even.v tb_top_odd_even.v"
os.system(cmd)
def _test():
# dut = Cosimulation("vvp -m ./myhdl.vpi const_assign", aBit=my_bit, aByte=my_byte)
# dut = const_assign(aBit=my_bit, aByte=my_byte)
def _test():
dut = Cosimulation("vvp -m ./myhdl.vpi odd_even_fsm", aBit=my_bit, aByte=my_byte)
# dut = odd_even_fsm(aBit=my_bit, aByte=my_byte)
@instance
def stim():
raise StopSimulation
return dut, stim
def main():
tb_fsm = traceSignals(tb, state, clk, rst_fsm, addr_left, muxsel_i, addr_sam, addr_rht,
muxaddrsel, we_1, dout, left_i, sam_i, right_i, do_first, x, z,xfifo,
zfifo, flgs_i, update_i, res_o, update_o, end_of_col, empty_r, full_r,
enr_r, enw_r, dataout_r, datain_r, empty_ro, full_ro, enr_ro, enw_ro,
dataout_ro, datain_ro, addr_in, del_ctn)
sim = Simulation(tb_fsm)
sim.run()
if __name__ == '__main__':
main()
'''
import os
from myhdl import *
def const_assign(aBit, aByte):
b = Signal(bool(True)) # to avoid "myhdl.AlwaysCombError: sensitivity list is empty"
@always_comb
def logic():
aBit.next = b
aByte.next = 0x55
return logic
def convert():
my_bit = Signal(bool(0))
my_byte = Signal(intbv(0)[8:])
toVerilog(const_assign, my_bit, my_byte)
def test():
my_bit = Signal(bool(0))
my_byte = Signal(intbv(0)[8:])
cmd = "iverilog -o const_assign const_assign.v tb_const_assign.v"
os.system(cmd)
def _test():
dut = Cosimulation("vvp -m ./myhdl.vpi const_assign", aBit=my_bit, aByte=my_byte)
# dut = const_assign(aBit=my_bit, aByte=my_byte)
@instance
def stim():
print "-------------"
yield delay(10)
print "Expected ({}, {}), detected ({}, {})".format(True, 0x55, my_bit, my_byte)
yield delay(10)
print "Expected ({}, {}), detected ({}, {})".format(True, 0x55, my_bit, my_byte)
yield delay(10)
print "Expected ({}, {}), detected ({}, {})".format(True, 0x55, my_bit, my_byte)
print "-------------"
raise StopSimulation
return dut, stim
Simulation(_test()).run()
if __name__ == '__main__':
convert()
test()
'''
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PairProducer")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/opt/CMMSW/Data/Phys14MiniAOD/GluGluToHToTauTau_M-125_13TeV-powheg-pythia6.root'
)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('/opt/CMMSW/Data/PairProducer.root')
)
process.leptonPair = cms.EDProducer('PairProducer',
taus = cms.InputTag("slimmedTaus"),
muons = cms.InputTag("slimmedMuons"),
electrons = cms.InputTag("slimmedElectrons"),
)
process.p = cms.Path(process.leptonPair)
process.e = cms.EndPath(process.out)
|
http://www.cnblogs.com/wupeiqi/articles/6229292.html #爬虫性能相关和Scrapy框架
性能相关
1.在编写爬虫时,性能的消耗主要在IO请求中,当单进程单线程模式下请求URL时必然会引起等待,从而使得请求整体变慢
1.1单线程单进程模式
实例1:#用时 ==> 19.061163187026978
import requests,time
time1 = time.time()
a = requests.get('http://www.cnblogs.com/wupeiqi/articles/6229292.html')
b = requests.get('https://h5.qichedaquan.com/jike/?jkcx=0&channel=jingxiyuean')
print(a.text)
print(b.text)
print(time.time()- time1)
实例2:#用时 ==>19.067904472351074
import requests,time
time1 = time.time()
def get_url(url):
a = requests.get(url)
return a.text
url_list = ['http://www.cnblogs.com/wupeiqi/articles/6229292.html',
'https://h5.qichedaquan.com/jike/?jkcx=0&channel=jingxiyuean']
for i in url_list:
mess = get_url(i)
print(mess)
print(time.time()-time1)
1.2多线程
1.2.1 Python threading模块 #用时 ==> 9.92071795463562
import requests
import threading
import time
def get_url(url):
a = requests.get(url)
print(a.text)
print(time.time() - mytime)
url_list = ['http://www.cnblogs.com/wupeiqi/articles/6229292.html',
'https://h5.qichedaquan.com/jike/?jkcx=0&channel=jingxiyuean']
if __name__ == '__main__':
mytime = time.time()
for url in url_list:
t = threading.Thread(target=get_url,args=(url,))
t.start()
1.2.2 线程池 #用时 ==> 10.068973302841187
from concurrent.futures import ThreadPoolExecutor
import requests
import time
def fetch_async(url):
response = requests.get(url)
return response.text
def callback(future):
print(future.result())
url_list = ['http://www.cnblogs.com/wupeiqi/articles/6229292.html',
'https://h5.qichedaquan.com/jike/?jkcx=0&channel=jingxiyuean']
pool = ThreadPoolExecutor(2) #创建个容量为2的线程池
time1 = time.time()
for url in url_list:
v = pool.submit(fetch_async, url)
v.add_done_callback(callback)
pool.shutdown(wait=True)
print(time.time()-time1)
从Python3.2开始,标准库为我们提供了concurrent.futures模块,它提供了ThreadPoolExecutor和ProcessPoolExecutor两个类,对编写线程池/进程池提供了直接的支持
参考:https://www.ziwenxie.site/2016/12/24/python-concurrent-futures/
1.3多进程
1.3.1 Python multiprocessing模块 #用时 ==>10.076193809509277
from multiprocessing import Process
import requests
import time
def fetch_async(url):
response = requests.get(url)
print(response)
print(time.time() - time1)
url_list = ['http://www.cnblogs.com/wupeiqi/articles/6229292.html',
'https://h5.qichedaquan.com/jike/?jkcx=0&channel=jingxiyuean']
time1 = time.time()
if __name__ == '__main__':
for url in url_list:
p = Process(target=fetch_async, args=(url,))
p.start()
PS:跑个题,进程这儿涉及进程间不能通信的问题,queue,managers,pipes都能解决这个问题
1.3.2进程池 #用时 ==> 11.25045657157898
from concurrent.futures import ProcessPoolExecutor
import requests
import time
def fetch_async(url):
response = requests.get(url)
return response
def callback(future):
print(future.result())
url_list = ['http://www.cnblogs.com/wupeiqi/articles/6229292.html',
'https://h5.qichedaquan.com/jike/?jkcx=0&channel=jingxiyuean']
pool = ProcessPoolExecutor(2)
time1 = time.time()
if __name__ == '__main__':
for url in url_list:
v = pool.submit(fetch_async, url)
v.add_done_callback(callback)
pool.shutdown(wait=True)
print(time.time() - time1)
2.通过上述代码均可以完成对请求性能的提高,对于多线程和多进程的缺点是在IO阻塞时会造成了线程和进程的浪费,所以异步IO会是首选:
2.1 asyncio示例 #用时 ==>0.6783857345581055 非常强
asyncio是Python 3.4版本引入的标准库,直接内置了对异步IO的支持
asyncio的编程模型就是一个消息循环,从asyncio模块中直接获取一个EventLoop的引用,然后把需要执行的协程扔到EventLoop中执行,就实现了异步IO
import asyncio
import time
@asyncio.coroutine #把一个generator标记为coroutine类型,把这个coroutine扔到EventLoop中执行
def wget(host, url='/'):
print('路径: %s%s' % (host, url))
reader, writer = yield from asyncio.open_connection(host, 80)
header = """GET %s HTTP/1.0\r\nHost: %s\r\n\r\n""" % (url, host) #拼接header
print("header",header)
writer.write(header.encode('utf-8')) #encode header
# yield from asyncio.sleep(5) #yield from语法调用另一个generator,asyncio.sleep()也是一个coroutine,把asyncio.sleep()看成是一个耗时n秒的IO操作,在此期间,主线程不等待,而是去执行EventLoop中其他可以执行的coroutine了,因此可以实现并发执行
yield from writer.drain() #循环调写操作并刷新buffer, 写入数据量大时用这个 给你指个路==> https://docs.python.org/3/library/asyncio-stream.html#asyncio.StreamWriter
text = yield from reader.read()
print(host, url, text.decode())
writer.close()
tasks = [
wget('www.cnblogs.com', '/wupeiqi/'),
wget('www.cnblogs.com', '/wupeiqi/articles/6229292.html')] #封装两个coroutine
time1 =time.time()
loop = asyncio.get_event_loop() #获取EventLoop
results = loop.run_until_complete(asyncio.gather(*tasks)) #执行coroutine
loop.close()
print(time.time() - time1)
'''
参考:https://docs.python.org/3/library/asyncio-stream.html#asyncio.open_connection
open_connection(host=None, port=None, *, loop=None, limit=None, **kwds),是协程
reader return一个StreamReader实例,writer return一个StreamWriter实例,传入的参数和create_connection()没啥区别
'''
2.2 gevent + requests
import gevent
import requests
from gevent import monkey
monkey.patch_all() #打补丁
def fetch_async(method, url, req_kwargs):
print(method, url, req_kwargs)
response = requests.request(method=method, url=url, **req_kwargs)
print(response.url, response.content)
# ##### 发送请求 #####
gevent.joinall([
gevent.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
gevent.spawn(fetch_async, method='get', url='https://www.yahoo.com/', req_kwargs={}),
gevent.spawn(fetch_async, method='get', url='https://github.com/', req_kwargs={}),
])
'''
##### 发送请求(协程池控制最大协程数量) #####
from gevent.pool import Pool
pool = Pool(None)
gevent.joinall([
pool.spawn(fetch_async, method='get', url='https://www.python.org/', req_kwargs={}),
pool.spawn(fetch_async, method='get', url='https://www.yahoo.com/', req_kwargs={}),
pool.spawn(fetch_async, method='get', url='https://www.github.com/', req_kwargs={}),
])
'''
2.3 Tornado
pass
2.4 Twisted示例
pass
更多例子:http://www.cnblogs.com/wupeiqi/articles/6229292.html
以上均是Python内置以及第三方模块提供异步IO请求模块,使用简便大大提高效率,而对于异步IO请求的本质则是【非阻塞Socket】+【IO多路复用】:
pass
Scrapy
Scrapy介绍:
使用Twisted异步网络库处理网络通讯,爬取网站数据,提取结构性数据的应用框架,可以应用在数据挖掘,信息处理,监测和自动化测试或存储历史数据等一系列的程序中
Scrapy主要包含组件;
引擎(Scrapy):用来处理整个系统的数据流处理, 触发事务(框架核心)
调度器(Scheduler):用来接受引擎发过来的请求, 压入队列中, 并在引擎再次请求的时候返回. 可以想像成一个URL(抓取网页的网址或者说是链接)的优先队列, 由它来决定下一个要抓取的网址是什么, 同时去除重复的网址
下载器(Downloader):用于下载网页内容, 并将网页内容返回给蜘蛛(Scrapy下载器是建立在twisted这个高效的异步模型上的)
爬虫(Spiders):爬虫是主要干活的, 用于从特定的网页中提取自己需要的信息, 即所谓的实体(Item)。用户也可以从中提取出链接,让Scrapy继续抓取下一个页面
项目管道(Pipeline):负责处理爬虫从网页中抽取的实体,主要的功能是持久化实体、验证实体的有效性、清除不需要的信息。当页面被爬虫解析后,将被发送到项目管道,并经过几个特定的次序处理数据
下载器中间件(Downloader Middlewares):位于Scrapy引擎和下载器之间的框架,主要是处理Scrapy引擎与下载器之间的请求及响应
爬虫中间件(Spider Middlewares):介于Scrapy引擎和爬虫之间的框架,主要工作是处理蜘蛛的响应输入和请求输出
调度中间件(Scheduler Middewares):介于Scrapy引擎和调度之间的中间件,从Scrapy引擎发送到调度的请求和响应
Scrapy运行流程
1.引擎从调度器中取出一个链接(URL)用于接下来的抓取
2.引擎把URL封装成一个请求(Request)传给下载器
3.下载器把资源下载下来,并封装成应答包(Response)
4.爬虫解析Response
5.解析出实体(Item),则交给实体管道进行进一步的处理
6.解析出的是链接(URL),则把URL交给调度器等待抓取
安装:
Linux
pip3 install scrapy
Windows
a. pip3 install wheel
b. 下载twisted http://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted
c. 进入下载目录,执行 pip3 install Twisted-17.5.0-cp35-cp35m-win_amd64.whl
d. pip3 install scrapy
e. 下载并安装pywin32:https://sourceforge.net/projects/pywin32/files/
基本命令:
1. scrapy startproject 项目名称
- scrapy startproject cutetopaz #在当前目录中创建中创建一个项目文件(类似于Django)
2. scrapy genspider [-t template] <name> <domain>
- scrapy genspider -t basic topaz topaz.com #创建爬虫应用
3. scrapy list #展示爬虫应用列表
4. scrapy crawl 爬虫应用名称 #运行单独爬虫应用
PS:
scrapy genspider -l #查看所有命令
scrapy genspider -d 模板名称 #查看模板命令
来俩实例练练手:
练手之前了解下HtmlXpathSelector,HtmlXpathSelector用于结构化HTML代码并提供选择器功能,比beautiful快
#_*_coding:utf-8_*_
# Author:Topaz
from scrapy.selector import Selector, HtmlXPathSelector
from scrapy.http import HtmlResponse
html = """<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<ul>
<li class="item-"><a id='i12' href="link.html">first item</a></li>
<li class="item-0"><a id='i2' href="llink.html">first item</a></li>
<li class="item-1"><a href="llink2.html">second item<span>vv</span></a></li>
</ul>
<div><a href="llink2.html">second item</a></div>
</body>
</html>
"""
response = HtmlResponse(url='http://example.com', body=html,encoding='utf-8')
hxs = Selector(response) # ==> <Selector xpath=None data='<html>\n <head lang="en">\n <met'>
hxs = Selector(response=response).xpath('//a') #拿到了所有a标签
hxs = Selector(response=response).xpath('//a[@id]') #拿到所有有id属性的标签
hxs = Selector(response=response).xpath('//a[starts-with(@href,"link")]' ) #拿到开头为link的href标签
hxs = Selector(response=response).xpath('//a[contains(@href, "link")]') #拿到链接包含link的href标签
hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]') #正则 取出i开头后边是数字的
hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/text()').extract() #===> ['first item', 'first item']
hxs = Selector(response=response).xpath('//a[re:test(@id, "i\d+")]/@href').extract() #==> ['link.html', 'llink.html']
hxs = Selector(response=response).xpath('/html/body/ul/li/a/@href').extract() #==> ['link.html', 'llink.html', 'llink2.html']
hxs = Selector(response=response).xpath('//body/ul/li/a/@href').extract_first() #==> link.html
print(hxs)
参考:https://doc.scrapy.org/en/0.12/topics/selectors.html
实例1:
import scrapy
from scrapy.selector import Selector #一会儿结构化html用,跟beautiful一个作用
from scrapy.http.request import Request #Request是一个封装用户请求的类,在回调函数中yield该对象表示继续访问
class DigSpider(scrapy.Spider):
name = "dig" # 爬虫应用的名称,通过此名称启动爬虫命令
allowed_domains = ["chouti.com"] # 允许的域名
start_urls = ['http://dig.chouti.com/',] # 起始URL
has_request_set = {}
def parse(self, response):
# print('url:',response.url)
page_list = Selector(response=response).xpath('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract()
for page in page_list: #循环拿到的uri列表
page_url = 'http://dig.chouti.com%s' % page #拼接
key = self.md5(page_url) #送它去加密
if key in self.has_request_set:
pass
else:
self.has_request_set[key] = page_url #把key加到列表里
obj = Request(url=page_url, method='GET', callback=self.parse)
yield obj
@staticmethod
def md5(val): #封装成静态方法,不让它访问类变量和实例变量
import hashlib
ha = hashlib.md5()
ha.update(bytes(val, encoding='utf-8'))
key = ha.hexdigest()
return key
实例2:scrapy自动登录抽屉并点赞:
import scrapy
import hashlib
from scrapy.selector import Selector
from scrapy.http.request import Request
from scrapy.http.cookies import CookieJar
from scrapy import FormRequest
class MyLittleSpider(scrapy.Spider):
name = 'topaz'
allowed_domains = ['chouti.com']
cookie_dict = {}
has_request_set = {}
def start_requests(self):
url = 'http://dig.chouti.com/'
obj = Request(url=url, callback=self.login)
# print('obj',obj) #==><GET http://dig.chouti.com/>
yield obj
def login(self,response):
# print(response,response.request) # request==> <200 http://dig.chouti.com/>, response.request ==> <GET http://dig.chouti.com/>
my_cookies = CookieJar()
my_cookies.extract_cookies(response,response.request)
# print('cookies!!!!',my_cookies._cookies) # ==> 社会主义cookie,有用的cookie,想要的都有,取就是了
for k, v in my_cookies._cookies.items():
# print('随意拿,不要害羞',v) #==> 分成了两部分, 捂污吴~~
for i,j in v.items():
# print('来宝贝跟稳了我们一起看jj',j) # ==>有包含gpsd的部分哟~
for m,n in j.items():
# print('只是个M啦',m) #==>gpsd等
# print('n',n.value) #==>gpsd的值等
self.cookie_dict[m] = n.value
# print('看看大字典',self.cookie_dict) #==>{'gpsd': 'a460c7e96329f9b6257ebe805f54d9dc', 'route': '249e9500f56e96c9681c6db3bc475cbf', 'JSESSIONID': 'aaaqYBsRkE1JRa77_hH5v'}
req = Request(
url = 'http://dig.chouti.com/login',
method ='POST',
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
body='phone=8618310703270&password=123456',
cookies = self.cookie_dict,
callback = self.check_login
)
yield req
def check_login(self,response):
req = Request(
url = 'http://dig.chouti.com/',
method='GET',
cookies=self.cookie_dict,
dont_filter=True,
callback=self.show)
yield req
def show(self,response): #第一页是check_login传给他的,后面的都是自己循环出来传给自己的
print(response.url)
new_list = Selector(response=response).xpath('//div[@id="content-list"]/div[@class="item"]') #取出传进来的reponse页面上的全部段子div
for new in new_list:
link_id = new.xpath('*/div[@class="part2"]/@share-linkid').extract_first() #==> 取出每条赞的id
req = Request(
url='http://dig.chouti.com/link/vote?linksId=%s' %(link_id,),
method='POST',
cookies=self.cookie_dict,
callback=self.do_favor,
)
yield req
page_list = Selector(response=response).xpath('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/\d+")]/@href').extract()
for page in page_list:
page_url = 'http://dig.chouti.com%s' %page
hash = hashlib.md5()
hash.update(bytes(page_url,encoding='utf-8'))
key = hash.hexdigest()
if key in self.has_request_set:
# print(self.has_request_set)
pass
else:
# print('调用自己',page_url) http://dig.chouti.com/all/hot/recent/9
self.has_request_set[key] = page_url
req = Request(
url=page_url,
method='GET',
callback=self.show
)
yield req
def do_favor(self,reponse):
print(reponse.text)
项目结构以及爬虫应用简介
cutetopaz/
scrapy.cfg #项目的主配置信息(真正爬虫相关的配置信息在settings.py文件中)
cutetopaz/
__init__.py
items.py #设置数据存储模板,用于结构化数据 如:Django的Model
pipelines.py #数据处理行为 如:一般结构化的数据持久化
settings.py #配置文件 如:递归的层数、并发数,延迟下载等
spiders/ #爬虫目录 如:创建文件,编写爬虫规则
__init__.py
爬虫1.py #一般创建爬虫文件时,以网站域名命名
settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for step8_king project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# 1. 爬虫名称
BOT_NAME = 'step8_king'
# 2. 爬虫应用路径
SPIDER_MODULES = ['step8_king.spiders']
NEWSPIDER_MODULE = 'step8_king.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# 3. 客户端 user-agent请求头
# USER_AGENT = 'step8_king (+http://www.yourdomain.com)'
# Obey robots.txt rules
# 4. 禁止爬虫配置
# ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# 5. 并发请求数
# CONCURRENT_REQUESTS = 4
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# 6. 延迟下载秒数
# DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
# 7. 单域名访问并发数,并且延迟下次秒数也应用在每个域名
# CONCURRENT_REQUESTS_PER_DOMAIN = 2
# 单IP访问并发数,如果有值则忽略:CONCURRENT_REQUESTS_PER_DOMAIN,并且延迟下次秒数也应用在每个IP
# CONCURRENT_REQUESTS_PER_IP = 3
# Disable cookies (enabled by default)
# 8. 是否支持cookie,cookiejar进行操作cookie
# COOKIES_ENABLED = True
# COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default)
# 9. Telnet用于查看当前爬虫的信息,操作爬虫等...
# 使用telnet ip port ,然后通过命令操作
# TELNETCONSOLE_ENABLED = True
# TELNETCONSOLE_HOST = '127.0.0.1'
# TELNETCONSOLE_PORT = [6023,]
# 10. 默认请求头
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# 11. 定义pipeline处理请求
# ITEM_PIPELINES = {
# 'step8_king.pipelines.JsonPipeline': 700,
# 'step8_king.pipelines.FilePipeline': 500,
# }
# 12. 自定义扩展,基于信号进行调用
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# # 'step8_king.extensions.MyExtension': 500,
# }
# 13. 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度
# DEPTH_LIMIT = 3
# 14. 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo
# 后进先出,深度优先
# DEPTH_PRIORITY = 0
# SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
# 先进先出,广度优先
# DEPTH_PRIORITY = 1
# SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'
# 15. 调度器队列
# SCHEDULER = 'scrapy.core.scheduler.Scheduler'
# from scrapy.core.scheduler import Scheduler
# 16. 访问URL去重
# DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl'
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
"""
17. 自动限速算法
from scrapy.contrib.throttle import AutoThrottle
自动限速设置
1. 获取最小延迟 DOWNLOAD_DELAY
2. 获取最大延迟 AUTOTHROTTLE_MAX_DELAY
3. 设置初始下载延迟 AUTOTHROTTLE_START_DELAY
4. 当请求下载完成后,获取其"连接"时间 latency,即:请求连接到接受到响应头之间的时间
5. 用于计算的... AUTOTHROTTLE_TARGET_CONCURRENCY
target_delay = latency / self.target_concurrency
new_delay = (slot.delay + target_delay) / 2.0 # 表示上一次的延迟时间
new_delay = max(target_delay, new_delay)
new_delay = min(max(self.mindelay, new_delay), self.maxdelay)
slot.delay = new_delay
"""
# 开始自动限速
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# 初始下载延迟
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# 最大下载延迟
# AUTOTHROTTLE_MAX_DELAY = 10
# The average number of requests Scrapy should be sending in parallel to each remote server
# 平均每秒并发数
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# 是否显示
# AUTOTHROTTLE_DEBUG = True
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
"""
18. 启用缓存
目的用于将已经发送的请求或相应缓存下来,以便以后使用
from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware
from scrapy.extensions.httpcache import DummyPolicy
from scrapy.extensions.httpcache import FilesystemCacheStorage
"""
# 是否启用缓存策略
# HTTPCACHE_ENABLED = True
# 缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可
# HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy"
# 缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略
# HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
# 缓存超时时间
# HTTPCACHE_EXPIRATION_SECS = 0
# 缓存保存路径
# HTTPCACHE_DIR = 'httpcache'
# 缓存忽略的Http状态码
# HTTPCACHE_IGNORE_HTTP_CODES = []
# 缓存存储的插件
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
"""
19. 代理,需要在环境变量中设置
from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware
方式一:使用默认
os.environ
{
http_proxy:http://root:woshiniba@192.168.11.11:9999/
https_proxy:http://192.168.11.11:9999/
}
方式二:使用自定义下载中间件
def to_bytes(text, encoding=None, errors='strict'):
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors)
class ProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{'ip_port': '111.11.228.75:80', 'user_pass': ''},
{'ip_port': '120.198.243.22:80', 'user_pass': ''},
{'ip_port': '111.8.60.9:8123', 'user_pass': ''},
{'ip_port': '101.71.27.120:80', 'user_pass': ''},
{'ip_port': '122.96.59.104:80', 'user_pass': ''},
{'ip_port': '122.224.249.122:8088', 'user_pass': ''},
]
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
print "**************ProxyMiddleware have pass************" + proxy['ip_port']
else:
print "**************ProxyMiddleware no pass************" + proxy['ip_port']
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
DOWNLOADER_MIDDLEWARES = {
'step8_king.middlewares.ProxyMiddleware': 500,
}
"""
"""
20. Https访问
Https访问时有两种情况:
1. 要爬取网站使用的可信任证书(默认支持)
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory"
2. 要爬取网站使用的自定义证书
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory"
# https.py
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate)
class MySSLFactory(ScrapyClientContextFactory):
def getCertificateOptions(self):
from OpenSSL import crypto
v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
return CertificateOptions(
privateKey=v1, # pKey对象
certificate=v2, # X509对象
verify=False,
method=getattr(self, 'method', getattr(self, '_ssl_method', None))
)
其他:
相关类
scrapy.core.downloader.handlers.http.HttpDownloadHandler
scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
相关配置
DOWNLOADER_HTTPCLIENTFACTORY
DOWNLOADER_CLIENTCONTEXTFACTORY
"""
"""
21. 爬虫中间件
class SpiderMiddleware(object):
def process_spider_input(self,response, spider):
'''
下载完成,执行,然后交给parse处理
:param response:
:param spider:
:return:
'''
pass
def process_spider_output(self,response, result, spider):
'''
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
'''
return result
def process_spider_exception(self,response, exception, spider):
'''
异常调用
:param response:
:param exception:
:param spider:
:return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
'''
return None
def process_start_requests(self,start_requests, spider):
'''
爬虫启动时调用
:param start_requests:
:param spider:
:return: 包含 Request 对象的可迭代对象
'''
return start_requests
内置爬虫中间件:
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
"""
# from scrapy.contrib.spidermiddleware.referer import RefererMiddleware
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'step8_king.middlewares.SpiderMiddleware': 543,
}
"""
22. 下载中间件
class DownMiddleware1(object):
def process_request(self, request, spider):
'''
请求需要被下载时,经过所有下载器中间件的process_request调用
:param request:
:param spider:
:return:
None,继续后续中间件去下载;
Response对象,停止process_request的执行,开始执行process_response
Request对象,停止中间件的执行,将Request重新调度器
raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
'''
pass
def process_response(self, request, response, spider):
'''
spider处理完成,返回时调用
:param response:
:param result:
:param spider:
:return:
Response 对象:转交给其他中间件process_response
Request 对象:停止中间件,request会被重新调度下载
raise IgnoreRequest 异常:调用Request.errback
'''
print('response1')
return response
def process_exception(self, request, exception, spider):
'''
当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
:param response:
:param exception:
:param spider:
:return:
None:继续交给后续中间件处理异常;
Response对象:停止后续process_exception方法
Request对象:停止中间件,request将会被重新调用下载
'''
return None
默认下载中间件
{
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
}
"""
# from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'step8_king.middlewares.DownMiddleware1': 100,
# 'step8_king.middlewares.DownMiddleware2': 500,
# }
pipelines.py
中间件
自定制命令
避免重复访问
自定义扩展
真的の实例
|
import operator
from functools import reduce
import os
import math
import pyautogui
from PIL import Image
from random import randint
import time
import fileinput
fileparent=["C:/Users/d0","C:/Users/d1","C:/Users/d2","C:/Users/d3"]
filelocation=['','','','']
while(True):
for i in range(0,4):
rand1=randint(0,9)
rand2=randint(0,9)
rand3=randint(0,9)
rand4=randint(0,9)
filelocation[i]=fileparent[i]+'img'+str(i)+'card'+str(rand1)+str(rand2)+str(rand3)+str(rand4)+'.png'
print(filelocation[i])
pyautogui.screenshot(filelocation[0],region=(287,503,53,40))
pyautogui.screenshot(filelocation[1],region=(342,503,53,40))
pyautogui.screenshot(filelocation[2],region=(965,503,53,40))
pyautogui.screenshot(filelocation[3],region=(1020,503,53,40))
time.sleep(2)
|
import time
import sys
import stomp
class MyListener(stomp.ConnectionListener):
def on_error(self, headers, message):
print('received an error "%s"' % message)
def on_message(self, headers, message):
print('received a message "%s"' % message)
def on_connected(self,headers,body):
print('Connected to broker')
conn = stomp.Connection12([('broker-amq-stomp-ssl-amq-stomp.cloudapps.nocosetest.com', 443)])
ssl_result = conn.set_ssl([('broker-amq-stomp-ssl-amq-stomp.cloudapps.nocosetest.com',443)],
key_file="broker.key",
cert_file="certificate.pem")
conn.set_listener('', MyListener())
conn.start()
conn.connect('admin', 'admin', wait=True)
conn.subscribe(destination='/queue/noctestQ', id=1, ack='auto')
conn.send(body=' '.join(sys.argv[1:]), destination='/queue/noctestQ')
time.sleep(2)
conn.disconnect()
|
from chess.board import Board
import pytest
@pytest.fixture
def board():
arr = [
["br", "bn", "bb", "bq", "bk", "bb", "bn", "br"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "wq", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["wr", "wn", "wb", "--", "wk", "wb", "wn", "wr"]
]
return Board(array=arr)
@pytest.mark.parametrize(
"coord, piece, moves", [
((0, 3), 'bq', []), # black blocked
((4, 2), 'wq', [(4, 1), (4, 0), (3, 2), (2, 2), (1, 2), (4, 3), (4, 4),
(4, 5), (4, 6), (4, 7), (5, 2), (3, 1), (2, 0), (3, 3),
(2, 4), (1, 5), (5, 1), (6, 0), (5, 3)])
]
)
def test_queen_moves(board, coord, piece, moves):
queen = board[coord]
assert queen.name == piece
assert set(queen.get_moves(board)) == set(moves)
|
## 1. Overview ##
f = open("movie_metadata.csv", "r")
rows = f.read().split("\n")
movie_data = []
for row in rows:
movie_data.append(row.split(','))
print(movie_data[0:5])
## 3. Writing Our Own Functions ##
def first_elts(movies):
movie_names = []
for movie in movies:
movie_names.append(movie[0])
return movie_names
movie_names = first_elts(movie_data)
print(movie_names[0:5])
## 4. Functions with Multiple Return Paths ##
wonder_woman = ['Wonder Woman','Patty Jenkins','Color',141,'Gal Gadot','English','USA',2017]
def is_usa(movie):
if movie[6] == 'USA':
return True
else:
return False
wonder_woman_usa = is_usa(wonder_woman)
## 5. Functions with Multiple Arguments ##
wonder_woman = ['Wonder Woman','Patty Jenkins','Color',141,'Gal Gadot','English','USA',2017]
def is_usa(input_lst):
if input_lst[6] == "USA":
return True
else:
return False
def index_equals_str(input_lst, index, input_str):
if input_lst[index] == input_str:
return True
else:
return False
wonder_woman_in_color = index_equals_str(wonder_woman, 2, "Color")
## 6. Optional Arguments ##
def index_equals_str(input_lst,index,input_str):
if input_lst[index] == input_str:
return True
else:
return False
def counter(input_lst,header_row = False):
num_elt = 0
if header_row == True:
input_lst = input_lst[1:len(input_lst)]
for each in input_lst:
num_elt = num_elt + 1
return num_elt
def feature_counter(movies, input_str, index, header_row = False):
count = 0
if header_row == True:
movies = movies[1:]
for movie in movies:
if movie[index] == input_str:
count = count + 1
return count;
num_of_us_movies = feature_counter(movie_data, "USA", 6, header_row=True)
## 7. Calling a Function inside another Function ##
def feature_counter(input_lst,index, input_str, header_row = False):
num_elt = 0
if header_row == True:
input_lst = input_lst[1:len(input_lst)]
for each in input_lst:
if each[index] == input_str:
num_elt = num_elt + 1
return num_elt
def summary_statistics(movies):
num_japan_films = feature_counter(movies, 6, "Japan", header_row=True)
num_color_films = feature_counter(movies, 2, "Color", header_row=True)
num_films_in_english = feature_counter(movies, 5, "English", header_row=True)
m = {'japan_films' : num_japan_films, 'color_films': num_color_films, 'films_in_english': num_films_in_english}
return m
summary = summary_statistics(movie_data) |
# class WaterHeater:
# "热水器:战胜寒冬的有利武器"
# def __init__(self):
# self.__observers = []
# self.__temperature = 25
# def getTemperature(self):
# return self.__temperature
# def setTemperature(self, temperature):
# self.__temperature = temperature
# print("current temperature is:", self.__temperature)
# self.notifies()
# def addObserver(self, observer):
# self.__observers.append(observer)
# def notifies(self):
# for o in self.__observers:
# o.update(self)
# class Observer:
# "洗澡模式和饮用模式的父类"
# def update(self, waterHeater):
# pass
# class WashingMode(Observer):
# "该模式用于洗澡用"
# def update(self, waterHeater):
# if waterHeater.getTemperature() >= 50 and waterHeater.getTemperature() < 70:
# print("水已烧好,温度正好!可以用来洗澡了。")
# class DrinkingMode(Observer):
# "该模式用于饮用"
# def update(self, waterHeater):
# if waterHeater.getTemperature() >= 100:
# print("水b已烧开!可以用来饮用了。")
#
#
# def testWaterHeater():
# heater = WbaterHeater()
# washingObser = WashingMode()
# drinkingObser = DrinkingMode()
# heater.addObserver(washingObser)
# heater.addObserver(drinkingObser)
# heater.setTemperature(40)
# heater.setTemperature(60)
# heater.setTemperature(100)
#
#
# class Observer:
# "观察者的基类"
# def update(self, observer, object):
# pass
# class Observable:
# "被观察者的基类"
# def __init__(self):
# self.__observers = []
# def addObserver(self, observer):
# self.__observers.append(observer)
# def removeObserver(self, observer):
# self.__observers.remove(observer)
# def notifyObservers(self, object=0):
# for o in self.__observers:
# o.update(self, object)
#
#
# class WaterHeater(Observable):
# "热水器:战胜寒冬的有利武器"
# def __init__(self):
# super().__init__()
# self.__temperature = 25
# def getTemperature(self):
# return self.__temperature
# def setTemperature(self, temperature):
# self.__temperature = temperature
# print("current temperature is:", self.__temperature)
# self.notifyObservers()
# class WashingMode(Observer):
# "该模式用于洗澡用"
# def update(self, observable, object):
# if isinstance(observable,WaterHeater) and observable.getTemperature() >= 50 and observable.getTemperature() < 70:
# print("水已烧好,温度正好!可以用来洗澡了。")
# class DrinkingMode(Observer):
# "该模式用于饮用"
# def pupdate(self, observable, object):
# if isinstance(observable, WaterHeater) and observable.getTemperature() >= 100:
# print("水已烧开!可以用来饮用了。")
#
# class WaterHeater:
# def __init__(self):
# self.__observers = []
# self.__temperature = 25
# def get_temperature(self):
# return self.__temperature
# def set_temperature(self, temparature):
# self.__temperature = temparature
# print('现在温度是:', self.__temperature)
# self.notifies()
# def add_observer(self, observer):
# self.__observers.append(observer)
# def remove_observer(self, observer):
# self.__observers.remove(observer)
# def notifies(self):
# for o in self.__observers:
# o.update(self)
#
# class Observer:
# def update(self, waterHeater):
# pass
#
# class HeatMode(Observer):
# def update(self, waterHeater):
# if waterHeater.get_temperature() > 40 and waterHeater.get_temperature() < 70:
# print('可以洗澡了')
#
# class DrinkMode(Observer):
# def update(self, waterHeater):
# if waterHeater.get_temperature() > 100:
# print('可以喝水了')
#
# heater = WaterHeater()
# heatMode = HeatMode()
# drinkMode = DrinkMode()
# heater.add_observer(heatMode)
# heater.add_observer(drinkMode)
# heater.set_temperature(50)
# heater.set_temperature(101)
class Observer():
"""观察者基类"""
def update(self, observer, object):
pass
class Observerable():
""" 被观察者基类"""
def __init__(self):
self.__observers = []
def add_observer(self, observer):
self.__observers.append(observer)
def remove_observer(self, observer):
self.__observers.remove(observer)
def notifies(self, object=0):
for o in self.__observers:
o.update(self, object)
class WaterHeater(Observerable):
def __init__(self):
super().__init__()
self.__temparature = 25
def get_temperature(self):
return self.__temparature
def set_temperature(self, temperature):
self.__temparature = temperature
self.notifies()
class HeatMode(Observer):
def update(self, waterHeater, object):
if isinstance(waterHeater, WaterHeater) and waterHeater.get_temperature() > 40 and waterHeater.get_temperature() < 70:
print('可以洗澡了哟!')
class DrinkMode(Observer):
def update(self, waterHeater, object):
if isinstance(waterHeater, WaterHeater) and waterHeater.get_temperature() > 100:
print('可以喝水了')
heater = WaterHeater()
heatMode = HeatMode()
drinkMode = DrinkMode()
heater.add_observer(heatMode)
heater.add_observer(drinkMode)
heater.set_temperature(50)
heater.set_temperature(101)
|
"""
@File: thread_process.py
@CreateTime: 2020/1/11 上午9:59
@Desc: 多线程, 多进程
对于任务数量过多的,可以采用队列,一次取100条任务,执行结束后,再次取用, 在一个类中调用装饰器
"""
import time
import logging
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor, wait, ALL_COMPLETED, FIRST_COMPLETED
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
def time_count(parameter):
def wrapper(func):
def wrap(self, *args, **kwargs):
start_time = time.time()
func(self, *args, **kwargs)
end_time = time.time()
logging.info(f"{parameter}的执行时间为{end_time - start_time}")
return wrap
return wrapper
class Base(object):
def __init__(self):
self.task_list = ["task_one", "task_two", "task_three", "task_four", "task_five"]
def time_count(self, parameter):
def wrapper(self, func):
def wrap():
start_time = time.time()
self.func()
end_time = time.time()
logging.info(f"{parameter}的执行时间为{end_time - start_time}")
return wrap
return wrapper
@staticmethod
def main_job(name, num):
if num > 2 or num == 0:
num = 2
time.sleep(num)
logging.info(f"{name}的执行等待时间为{num}")
def add_more_job_list(self, ex) -> list:
task_all_list = list()
for num, task in enumerate(self.task_list):
generated_task = ex.submit(self.main_job, task, num)
task_all_list.append(generated_task)
return task_all_list
def run(self):
pass
class ProcessMore(Base):
def __init__(self):
super().__init__()
@time_count("process")
def run(self):
ex = ProcessPoolExecutor(max_workers=5)
self.add_more_job_list(ex)
ex.shutdown(wait=True)
class ThreadMore(Base):
def __init__(self):
super().__init__()
@time_count("thread")
def run(self):
executor = ThreadPoolExecutor(max_workers=5)
task_all = self.add_more_job_list(executor)
wait(task_all, return_when=ALL_COMPLETED)
if __name__ == '__main__':
pm = ProcessMore()
pm.run()
td = ThreadMore()
td.run()
|
import random
import sys
import os
#simply printing the message
print("hello world");
# writting single line comment
'''
multiline comments
'''
# in python u can store any type of variables
name="gokuljs"
print(name);
value1=10
value2=1.5
print(value1);
print(value2);
# numbers , lists ,tuples, dictionary ,Strings
# there are seven diffrent arithematic operaters
# +, -, *, /, % ,**(exponential calculations), // floor division
#Floor division is a normal division operation except that it returns the largest possible integer. This integer is either less than or equal to the normal division result. Floor function is mathematically denoted by this ⌊ ⌋ symbol.
print("=======================================================================")
print("5+2",5+2)
print("5-2",5-2)
print("5*2",5*2)
print("5/2",5/2)
print("5%2",5%2)
print("5//2",5//2)
print("5**2",5**2)
### order of operation
'''
multiplication and division is going to happen fisrt then addition and subtraction
'''
print("=======================================================================")
quote="\" i am a better gamer" # if u want to add a quate inside a strign then this is the procedure to be followed
print(quote);
## adding multiline string here
multi_line_quote='''Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen
book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.'''
print(multi_line_quote);
print("=======================================================================")
## if u dont need new line to work everytime
print("hello welcome to this new ",end="")
print("world")
## printing a newline character
print(" \ngokul \n is a good a coading" * 5) # *5 is going to print this line 5 times in out put
# getting started with lists
grocery_list=['juice','fruits','vegetables','alcholal']
# list the count always starts from 0 goes still n-1 position
print("printing first item in the list",grocery_list[0])
# printing the subsets f the lists
print(grocery_list[0:3])
# creating todolist
todolist=['gaming','breakfast','coading']
print(todolist)
# combining to lists
complete_list=[grocery_list,todolist]
print(complete_list) ## also an example for list inside an list
## now there are twolists inside a list
## now i want to use second item in the second list
print(complete_list[1][0])
print(complete_list[1][2])
print(complete_list[0][3])
# appending items into the list
grocery_list.append('oninons') ## normally this inserts value at the end of the list
print(grocery_list)
## inserting value at very particular index in the list
##listname(indexvalue,"position you want to insert")
grocery_list.insert(1,"pickle")
print(grocery_list)
# removing item in the list then .remove
grocery_list.remove("pickle")
print(grocery_list)
# sorting item in the list
grocery_list.sort();
print(grocery_list)
# reverse sorting the list
grocery_list.reverse();
print(grocery_list)
# deleting an item
# del grocery_list
# print(grocery_list)
# deleteing specific item in list
del grocery_list[4]
print(grocery_list)
todolist2=todolist+grocery_list
print(todolist2)
# getting lenght of the list
print(len(todolist2))
# len id the built in function to get the string
# getting maximum element in the list
print(max(todolist2))
'''
The function is applied to each item on the iterable. If max() is called with an iterable, it returns the largest item in it. If the iterable is empty then the default value is returned, otherwise, a ValueError exception is raised. If max() is called with multiple arguments, it returns the largest one.Jan 7, 2020
'''
## getting minimum element in the list
print(min(todolist2))
print("=======================================================================")
## getting started with tuples
## tuples are simliar to list
## but tuples cannot be changed once inserted
## they use parthensis instead of square brackets
# tuples can be converted to lists and viceversa
'''
Tuples are useful for representing what other languages often call records —
some related information that belongs together, like your student record. ... So like
strings, tuples are immutable. Once Python has created a tuple in memory, it cannot be
changed.
'''
tuple2=(1,2,3,4,5,6,7,9)
print(tuple2)
# converting tuple into list
list1=list(tuple2)
print(list1)
# converting list to tuple
tuple1=tuple(list1)
print(tuple1)
print(len(tuple1))
print(min(tuple1))
print(max(tuple1))
print(tuple1[0])
print(tuple1[1:4])
|
from functools import cached_property
from onegov.ballot import Vote
from onegov.core.i18n import SiteLocale
from onegov.election_day import _
from onegov.election_day.layouts.default import DefaultLayout
class MailLayout(DefaultLayout):
""" A special layout for creating HTML E-Mails. """
@cached_property
def base(self):
return self.template_loader['mail_layout.pt']
@cached_property
def primary_color(self):
return self.app.theme_options.get('primary-color', '#fff')
def model_title(self, model):
""" Returns the translated title of the given election or vote. Falls
back to the title of the default fallback, if no translated title is
available. """
return model.get_title(
self.request.locale,
self.request.default_locale
)
def model_url(self, model):
""" Returns the localized link to the given election of vote. """
return SiteLocale(self.request.locale).link(
self.request, self.request.link(model)
)
def subject(self, model):
""" Returns a nice subject for the given model. """
result = _("New intermediate results")
if model.completed:
result = _("Final results")
if isinstance(model, Vote):
if model.answer == 'accepted' or model.answer == 'proposal':
result = _("Accepted")
if model.answer == 'rejected':
result = _("Rejected")
if model.answer == 'counter-proposal':
result = _("Counter proposal accepted")
parts = [self.model_title(model), self.request.translate(result)]
parts = [part for part in parts if part]
return ' - '.join(parts)
@cached_property
def optout_link(self):
""" Returns the opt-out link of the principal. """
return self.request.link(
self.request.app.principal, 'unsubscribe-email'
)
|
#!/usr/bin/env python
# ENCODE DCC fingerprint/JSD plot wrapper
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
from encode_lib_common import (
log, ls_l, mkdir_p, rm_f, run_shell_cmd, strip_ext_bam)
from encode_lib_genomic import (
samtools_index)
from encode_lib_blacklist_filter import blacklist_filter_bam
def parse_arguments():
parser = argparse.ArgumentParser(
prog='ENCODE DCC Fingerprint/JSD plot.')
parser.add_argument(
'bams', nargs='+', type=str,
help='List of paths for filtered experiment BAM files.')
parser.add_argument('--ctl-bam', type=str, default='',
help='Path for filtered control BAM file.')
parser.add_argument('--blacklist', type=str, default='',
help='Blacklist BED file.')
parser.add_argument('--mapq-thresh', default=30, type=int,
help='Threshold for low MAPQ reads removal.')
parser.add_argument('--nth', type=int, default=1,
help='Number of threads to parallelize.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def fingerprint(bams, ctl_bam, blacklist, mapq_thresh, nth, out_dir):
# make bam index (.bai) first
# filter bams with blacklist
filtered_bams = []
for bam in bams:
filtered_bam = blacklist_filter_bam(bam, blacklist, out_dir)
samtools_index(filtered_bam, nth)
filtered_bams.append(filtered_bam)
filtered_ctl_bam = None
if ctl_bam:
filtered_ctl_bam = blacklist_filter_bam(ctl_bam, blacklist, out_dir)
samtools_index(filtered_ctl_bam, nth)
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bams[0])))
plot_png = '{}.jsd_plot.png'.format(prefix)
tmp_log = '{}.jsd.tmp'.format(prefix)
labels = []
bam_paths = []
jsd_qcs = []
for i, bam in enumerate(filtered_bams):
prefix_ = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
jsd_qcs.append('rep{}.{}.jsd.qc'.format(i+1, prefix_))
labels.append('rep{}'.format(i+1)) # repN
bam_paths.append(bam)
# add control
if filtered_ctl_bam:
labels.append('ctl1')
bam_paths.append(filtered_ctl_bam)
cmd = 'LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 plotFingerprint -b {} '
if filtered_ctl_bam:
cmd += '--JSDsample {} '.format(filtered_ctl_bam)
cmd += '--labels {} '
cmd += '--outQualityMetrics {} '
cmd += '--minMappingQuality {} '
cmd += '-T "Fingerprints of different samples" '
cmd += '--numberOfProcessors {} '
cmd += '--plotFile {}'
cmd = cmd.format(
' '.join(bam_paths),
' '.join(labels),
tmp_log,
mapq_thresh,
nth,
plot_png)
run_shell_cmd(cmd)
# remove intermediate files (blacklist-filtered BAM)
if filtered_ctl_bam:
rm_f(filtered_ctl_bam)
rm_f(filtered_bams)
# parse tmp_log to get jsd_qc for each exp replicate
with open(tmp_log, 'r') as fp:
for i, line in enumerate(fp.readlines()): # i is rep_id-1
if i == 0:
continue
if i > len(jsd_qcs):
break
with open(jsd_qcs[i-1], 'w') as fp2:
# removing repN from lines
fp2.write('\t'.join(line.strip().split('\t')[1:]))
rm_f(tmp_log)
return plot_png, jsd_qcs
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
log.info('Plotting Fingerprint on BAMs and calculating JSD...')
plot_png, jsd_qcs = fingerprint(
args.bams, args.ctl_bam, args.blacklist, args.mapq_thresh,
args.nth, args.out_dir)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 19:31:34 2017
@author: justjay
"""
#LIVE2 split images randomly
#Train: 17, Validation: 6, Test: 6
#Train: 23, Test: 6; train:test ~= 8:2
import numpy as np
import scipy.io as sio
names_mat = sio.loadmat('./refnames_all.mat')
dmos_mat = sio.loadmat('./dmos.mat')
dmos_dmos = dmos_mat['dmos']
all_imgs = names_mat['refnames_all']
index_jpg2k = np.arange(227)
index_jpg = np.arange(227, 460)
index_wn = np.arange(460, 634)
index_blur = np.arange(634, 808)
index_ff = np.arange(808, 982)
index = [index_jpg2k, index_jpg, index_wn, index_blur, index_ff]
names = ('coinsinfountain.bmp', 'ocean.bmp', 'statue.bmp', 'dancers.bmp',
'paintedhouse.bmp', 'stream.bmp', 'bikes.bmp', 'flowersonih35.bmp',
'parrots.bmp', 'studentsculpture.bmp', 'building2.bmp', 'plane.bmp',
'woman.bmp', 'buildings.bmp', 'house.bmp', 'rapids.bmp', 'womanhat.bmp',
'caps.bmp', 'lighthouse.bmp', 'sailing1.bmp', 'carnivaldolls.bmp',
'lighthouse2.bmp', 'sailing2.bmp', 'cemetry.bmp', 'manfishing.bmp',
'sailing3.bmp', 'churchandcapitol.bmp', 'monarch.bmp', 'sailing4.bmp')
train_dir = './train_label.txt'
#val_dir = './val_label.txt'
test_dir = './test.txt'
train_ptr = open(train_dir, 'w')
#val_ptr = open(val_dir, 'w')
test_ptr = open(test_dir, 'w')
#整体训练、测试
for m in range(5):
#分别对图片ID按names排序,分29组
imgbynames = [[] for i in range(len(names))]
for j in range(len(names)):
for i in index[m]:
if all_imgs[0][i][0] == names[j]:
imgbynames[j].append(i)
new_order = np.random.permutation(len(names))
#write train images,without validation: 17->23
for i in new_order[0:23]:
for j in range(len(imgbynames[i])):
train_ptr.write('{:06d}.bmp {:d}\n'.format(int(imgbynames[i][j])+1, int(np.round(dmos_dmos[0][imgbynames[i][j]]))))
#use validation
#write validation images
# for i in new_order[17:23]:
# for j in range(len(imgbynames[i])):
# val_ptr.write('{:06d}.bmp {:d}\n'.format(int(imgbynames[i][j])+1, int(np.round(dmos_dmos[0][imgbynames[i][j]]))))
#write test images
for i in new_order[23:29]:
for j in range(len(imgbynames[i])):
test_ptr.write('{:06d}.bmp\n'.format(int(imgbynames[i][j])+1))
#分类别训练、测试
#m = 4
##分别对图片ID按names排序,分29组
#imgbynames = [[] for i in range(len(names))]
#for j in range(len(names)):
# for i in index[m]:
# if all_imgs[0][i][0] == names[j]:
# imgbynames[j].append(i)
#
#
##write train images,without validation: 17->23
#for i in new_order[0:23]:
# for j in range(len(imgbynames[i])):
# train_ptr.write('{:06d}.bmp {:d}\n'.format(int(imgbynames[i][j])+1, int(np.round(dmos_dmos[0][imgbynames[i][j]]))))
#
##write test images
#for i in new_order[23:29]:
# for j in range(len(imgbynames[i])):
# test_ptr.write('{:06d}.bmp\n'.format(int(imgbynames[i][j])+1))
train_ptr.close()
#val_ptr.close()
test_ptr.close()
|
from Paragraphs.AccordionParagraph import AccordionParagraph
import pytest
@pytest.allure.feature('Paragraphs')
@pytest.allure.story('Accordion paragraph')
@pytest.mark.usefixtures('init_solution_page')
class TestAccordionParagraph:
@pytest.allure.title('VDM-??? Accordion paragraph - creation')
def test_accordion_creating(self):
self.node.fill_solution_page_mandatory()
self.node.add_paragraph('accordion')
accordion_paragraph = AccordionParagraph(self.driver)
accordion_paragraph.fill_accordion_paragraph_mandatory()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert (accordion_paragraph.accordion_test_data['accordion_item_title'] in self.driver.page_source)
assert (accordion_paragraph.accordion_test_data['accordion_item_content'] in self.driver.page_source)
self.node.delete_node()
@pytest.allure.title('VDM-??? Accordion paragraph - creation with all fields')
def test_accordion_creating_all_fields(self):
self.node.fill_solution_page_mandatory()
self.node.add_paragraph('accordion')
accordion_paragraph = AccordionParagraph(self.driver)
accordion_paragraph.fill_accordion_paragraph()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert (accordion_paragraph.accordion_test_data['accordion_item_title'] in self.driver.page_source)
assert (accordion_paragraph.accordion_test_data['accordion_item_content'] in self.driver.page_source)
self.node.delete_node()
@pytest.allure.title('VDM-??? Accordion paragraph - empty fields validation')
def test_accordion_empty_fields_validation(self):
self.node.fill_solution_page_mandatory()
self.node.add_paragraph('accordion')
accordion_paragraph = AccordionParagraph(self.driver)
accordion_paragraph.get_accordion_item_title()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url == url
@pytest.allure.title('VDM-??? Accordion paragraph - check fields existing')
def test_accordion_fields_existing(self):
self.node.add_paragraph('accordion')
accordion_paragraph = AccordionParagraph(self.driver)
assert accordion_paragraph.get_admin_title().is_displayed()
assert accordion_paragraph.get_background().first_selected_option
assert accordion_paragraph.get_accordion_colour().first_selected_option
assert accordion_paragraph.get_accordion_open_first_element().is_displayed()
assert accordion_paragraph.get_accordion_item_title().is_displayed()
assert accordion_paragraph.get_accordion_item_content().is_displayed()
assert accordion_paragraph.get_accordion_add_item_button().is_displayed() |
from django.contrib import admin
from .models import DataSchema, DataSet, Field
@admin.register(DataSchema)
class DataSchemaAdmin(admin.ModelAdmin):
list_display = ('title', 'created', 'updated',)
@admin.register(DataSet)
class DataSetAdmin(admin.ModelAdmin):
list_display = ('file', 'schema', 'created', 'status', )
@admin.register(Field)
class FieldAdmin(admin.ModelAdmin):
list_display = ('type', 'order', 'schema', ) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-21 22:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofiles', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='history',
name='score',
field=models.PositiveIntegerField(blank=True, default=5),
),
migrations.AlterField(
model_name='userprofile',
name='avatar',
field=models.ImageField(blank=True, default='default_app_avatar.pgn', upload_to='avatars'),
),
migrations.AlterField(
model_name='userprofile',
name='type_of_user',
field=models.IntegerField(choices=[(1, 'Premium'), (2, 'Estandar')], default=2),
),
]
|
from stdmodandoption import *
import time
import multiprocessing as mp
runtodo='m12fmhdcvhr'
i=580
info=SSF.outdirname(runtodo, i)
rundir=info['rundir']
Nsnapstring=info['Nsnapstring']
havecr=info['havecr']
haveB=info['haveB']
cutcold=0
dx=dy=dz=1
commonpath='/home/tkc004/scratch/snipshot/philruns/'
#SSF.mkdir_p(commonpath+rundir+'/output/withinr200spno100')
fname=commonpath+rundir+'/output/withinr20G/snipshot_'+Nsnapstring+'.hdf5'
data = RSS.readsnipshot(fname,ptypelist = [0])
G = data[0];
def pressureXYZlocal(G, pos, dx, dy, dz,havecr,haveB,cutcold,i):
data = CRTF.pressureXYZ(G, pos, dx, dy, dz,havecr=havecr,haveB=haveB,cutcold=cutcold)
return [[i,data]]
def chunks(l, n):
n = max(1, n)
lenl = len(l)
chucklist = [l[i:i+n] for i in xrange(0, lenl, n)]
return chucklist
def collect_results(result):
results.extend(result)
def joindata(results):
argsortlist=np.argsort([results[i][0] for i in range(len(results))])
totdict = np.array([results[i][1] for i in range(len(results))])
totdict=totdict[argsortlist]
comdict={}
for key in totdict[0]:
totarray=np.array([])
for indict in totdict:
totarray = np.append(totarray,indict[key])
comdict[key] = totarray.flatten()
return comdict
start = time.time()
withinr=200; spno=1
# Step 1: Init multiprocessing.Pool()
maxlength=20
nogrid=40
zmax=maxlength/2.0
xlist = ylist = zlist = np.linspace(-zmax,zmax,num=nogrid)
xl,yl,zl = np.meshgrid(xlist,ylist,zlist)
xl = np.ravel(xl); yl = np.ravel(yl); zl = np.ravel(zl);
pos=[]
for x,y,z in zip(xl,yl,zl):
pos.append([x,y,z])
nocpu = 10
lenpos = len(pos)
chunksize = lenpos/nocpu
listpos = chunks(pos, chunksize)
pool = mp.Pool(nocpu)
results=[]
# Step 2: `pool.apply` the `howmany_within_range()
pxyz = [pool.apply_async(pressureXYZlocal, args=(G, xyz, dx, dy, dz,havecr,haveB,cutcold,i),\
callback=collect_results) for i, xyz in enumerate(listpos)]
# Step 3: Don't forget to close
pool.close()
pool.join()
#for p in pxyz:
# print 'p.get()', p.get()
end = time.time()
print(end - start)
comdict=joindata(results)
#print 'comdict', comdict
|
# program that prints all the even numbers from 2 to 100.
#author Angelina B
evenNum = 2
while evenNum < 10:
print (evenNum)
evenNum += 2
|
# @Title: 找到所有数组中消失的数字 (Find All Numbers Disappeared in an Array)
# @Author: 2464512446@qq.com
# @Date: 2020-03-05 18:32:37
# @Runtime: 660 ms
# @Memory: 20.5 MB
class Solution:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
# 自己做的
# seen = set(nums)
# nums_all = set([i for i in range(1,len(nums)+1)])
# res = list(nums_all - seen)
# return res
# 不使用额外空间
for i in range(len(nums)):
new_index = abs(nums[i]) - 1
if nums[new_index] > 0:
nums[new_index] *= -1
result = []
for i in range(1, len(nums) + 1):
if nums[i - 1] > 0:
result.append(i)
return result
|
from conans.model import Generator
from conans.paths import BUILD_INFO_VISUAL_STUDIO
class VisualStudioGenerator(Generator):
template = '''<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" />
<PropertyGroup Label="UserMacros" />
<PropertyGroup Label="Conan-RootDirs">{item_properties}
</PropertyGroup>
<PropertyGroup Label="ConanVariables">
<ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>
<ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>
</PropertyGroup>
<PropertyGroup>
<LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
<DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
<AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>
</Link>
<Midl>
<AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</Midl>
<ResourceCompile>
<AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ResourceCompile>
</ItemDefinitionGroup>
<ItemGroup />
</Project>'''
item_template = '''
<Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''
def _format_items(self):
sections = []
for dep_name, cpp_info in self.deps_build_info.dependencies:
fields = {
'root_dir': cpp_info.rootpath,
'name': dep_name.replace(".", "-")
}
section = self.item_template.format(**fields)
sections.append(section)
return "".join(sections)
@property
def filename(self):
return BUILD_INFO_VISUAL_STUDIO
@property
def content(self):
per_item_props = self._format_items()
fields = {
'item_properties': per_item_props,
'bin_dirs': "".join("%s;" % p for p in self._deps_build_info.bin_paths).replace("\\", "/"),
'res_dirs': "".join("%s;" % p for p in self._deps_build_info.res_paths).replace("\\", "/"),
'include_dirs': "".join("%s;" % p for p in self._deps_build_info.include_paths).replace("\\", "/"),
'lib_dirs': "".join("%s;" % p for p in self._deps_build_info.lib_paths).replace("\\", "/"),
'libs': "".join(['%s.lib;' % lib if not lib.endswith(".lib")
else '%s;' % lib for lib in self._deps_build_info.libs]),
'definitions': "".join("%s;" % d for d in self._deps_build_info.defines),
'compiler_flags': " ".join(self._deps_build_info.cppflags + self._deps_build_info.cflags),
'linker_flags': " ".join(self._deps_build_info.sharedlinkflags),
'exe_flags': " ".join(self._deps_build_info.exelinkflags)
}
return self.template.format(**fields)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 16:39:36 2018
@author: Joshua Ip - Work
"""
antimonyString = ("""
J0: $AncDNA -> AncRNANuc ; a_rna * AncDNA
J1: $DimDNA -> DimRNANuc ; a_rna * DimDNA
# transcription
# units of (mRNA copies)/(sec)
J3: AncRNANuc -> AncRNACyt ; diffusion_rna * AncRNANuc - diffusion_rna * AncRNACyt
J2: DimRNANuc -> DimRNACyt ; diffusion_rna * DimRNANuc - diffusion_rna * DimRNACyt
# mRNA transport out of the nucleus into the cytoplasm
# units of (mRNA copies)/(sec)
J4: AncRNACyt -> ; d_rna * AncRNACyt
J5: DimRNACyt -> ; d_rna * DimRNACyt
J6: AncRNANuc -> ; d_rna * AncRNANuc
J7: DimRNANuc -> ; d_rna * DimRNANuc
# mRNA decay
# units of 1/(sec) * (mRNA copies) = (mRNA copies)/(sec)
J8: -> AncBinder ; a_nb * AncRNACyt
J9: -> DimBinder ; a_nb * DimRNACyt
# translation
# units of (protein copies)/(sec * mRNA copies) * (mRNA copies) = (protein copies / sec)
J10: AncBinder -> ; d_nb * AncBinder
J11: DimBinder -> ; d_nb * DimBinder
J12: DimerCyt -> ; d_nb * DimerCyt
J13: DimerNuc -> ; d_nb * DimerNuc
# protein decay
# units of (1 / sec) * (protein copies) = (protein copies / sec)
J14: Mol + AncBinder -> Complex ; k_on_anchor_binder * Mol * AncBinder - k_off_anchor_binder * Complex
# the anchor binder binds to molecule of interest to form a complex.
# nanobody complexes may dissociate over time
# units for forward reaction: (1 / (mols / liter) * sec) / (copies / mol) / liters * copies * copies = copies / sec
# units for backwards reaction: (1 / sec) * copies = copies / sec
J15: Complex + DimBinder -> DimerCyt ; k_on_dimerization_binder * DimBinder * Complex - k_off_dimerization_binder * DimerCyt
# dimerization binder binds to complex to form dimers
# dimers may dissociate, but much less often than complexes
# units for forward reaction: (1 / (mols / liter) * sec) / (copies / mol) / liters * copies * copies = copies / sec
# units for backwards reaction: (1 / sec) * copies = copies / sec
J16: DimerCyt -> DimerNuc; diffusion_nb * DimerCyt
J17: DimerNuc -> DimerCyt; diffusion_nb * DimerNuc
# dimer must be transported into the cell to act as a transcription factor
J18: DimerNuc + GeneOff -> GeneOn; k_on_transcription_factor * DimerNuc * GeneOff - k_off_transcription_factor * GeneOn
# dimer acts as transcription factor for a gene
# units: (copies) / (copies)
J19: -> RepRNANuc ; a_rna * GeneOn
J20: RepRNANuc -> RepRNACyt ; diffusion_rna * RepRNANuc - diffusion_rna * RepRNACyt
J22: RepRNANuc -> ; d_rna * RepRNANuc
J23: RepRNACyt -> ; d_rna * RepRNACyt
J24: -> Rep ; a_nb * RepRNACyt
J25: Rep -> ; d_nb * Rep
# the activated gene transcribes a reporter
# *****************************************************************************************************************************
# Parameters
AvoNum = 6.02 * 10^23;
TotalCellVol = 30.3 * 10^(-6);
NucleusVol = 4.3 * 10^(-6);
CytoplasmVol = TotalCellVol - NucleusVol;
# all volumes given in units of L,
# volumes from http://bionumbers.hms.harvard.edu/bionumber.aspx?id=106557&ver=1&trm=yeast%20cytoplasm%20volume&org=
scalingFactor = 60 * 60;
# since all our rates/rate constants are in seconds, we can scale time by multiplying each time-dependent parameter by a scaling factor
# this particular value scales the parameters for time units of hours
a_rna = (0.002) * scalingFactor;
# median transcription rate = 0.12 mRNA molecules/min = 0.002 mRNA molecules/sec
# median transcription rate from http://bionumbers.hms.harvard.edu/bionumber.aspx?id=106766&ver=3&trm=transcription%20rate%20yeast&org=
# KEY ASSUMPTION: the rate of transcription of our nanobody gene is constant.
# in reality, it may not be safe to assume that our molecule is transcribed by the median transcription rate
d_rna = 5.6 * 10^(-4) * scalingFactor;
# 5.6 * 10 ^ -4 = mRNA decay rate constant in units of sec^-1
# mRNA decay constant found from http://bionumbers.hms.harvard.edu/bionumber.aspx?id=105510&ver=5&trm=mrna%20s.%20cerevisiae&org=
a_nb = (0.0185) * scalingFactor;
# yeast has no rough ER, so translation occurs in the cytoplasm
# median time for translation initiation = 4.0 * 10^2 s * mRNA / protein
# median elongation rate = 9.5 aa/s
# nanobody average amino acids = 130 aa
# time for elongation = (130 aa / protein)/(9.5 aa/s) = 14 sec / protein
# total time for 1 mRNA transcript = 14 sec / protein + 40 sec = 54 sec
# rate at which mRNA is transcribed = 1 protein/(54 sec * 1 mRNA) / ~ 0.0185 protein/(sec mRNA)
# it is notable that translation initiation rate can vary between mRNA by orders of magnitude
# all data from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3694300/
d_nb = 2.6 * 10^(-4) * scalingFactor;
# which shows that the median half-life of a protein in a budding yeast cell is 43 minutes
# median rate constant of degradation of proteins in a yeast cell = 2.6e-4 1/sec
# data from http://www.pnas.org/content/103/35/13004 (doi: https://doi.org/10.1073/pnas.0605420103) https://www.nature.com/articles/nature10098,
k_on_anchor_binder = 4.0 * 10^5 * scalingFactor;
k_off_anchor_binder = 80 * 10^(-1) *scalingFactor;
# k_on of antibody-binding to cytochrome C = (4.0 +- 0.3) * 10^5 1/(M * sec)
# From gu's data, K_d of anchor binder binding = 20 * 10^-6, units of M
# K_d = k_off / k_on, therefore k_off = K_d * k_on
# 4.0 * 10^5 1/(M * sec) * (20 * 10^-6 M) = 80 * 10^-1 (sec^-1)
# this is one of the binding affinities that we will do a parameter sweep to learn more about
k_on_dimerization_binder = 4.0 * 10^5 * scalingFactor;
k_off_dimerization_binder = 400 * 10^(-1) * scalingFactor;
# k_on of antibody-binding to cytochrome C = (4.0 +- 0.3) * 10^5, units of 1/(M * sec)
# from Gu's data, K_d of dimerization binder binding = 100 * 10^-9, units of M
# K_d = k_off / k_on, therefore k_off = K_d * k_on
# 4.0 * 10^5 1/(M * sec) * (100 * 10^-6 M) = 400 * 10^-1 (sec^-1)
# this is one of the binding affinities that we will do a parameter sweep to learn more about
k_on_transcription_factor = 1.0 * 10^9 * scalingFactor;
k_off_transcription_factor = 1.11 * 10^(-3) * scalingFactor;
# k_on of Egr1 DNA binding domain = 1.0 * 10^9, units of 1/(sec * M)
# k_off of EGr1 DNA binding domain = 1.11 * 10^-3, units of 1/sec
# data from http://bionumbers.hms.harvard.edu/bionumber.aspx?s=n&v=5&id=104597
diffusion_rna = 1;
diffusion_nb = 3;
# Where do we get this?
# *****************************************************************************************************************************************
# Initial values
# These are all in copies
AncDNA = 1;
DimDNA = 1;
Mol = 0;
GeneOff = 1;
Setting = 50;
at time>=4: Mol=Setting;
"""); |
import sys, os
sys.path.insert(1, os.getcwd())
import json
from obi.db import *
from uuid import uuid4
patch_1 = {}
patch_1['items_to_create'] = [
{"op": "add", "path": "/test", "value": ["a new item, cool"]}
]
def create_records():
user = User.get()
bucket = DataBucket.get(id=1)
print(bucket, bucket.data)
data = bucket.data.select()
print(data[0].json_store)
print(user, bucket, data)
changeset = ChangeSet()
changeset.bucket = bucket
changeset.user = user
patch_1[str(data[0].uuid)] = [
{"op": "add", "path": "/hello", "value": ["world"]},
{"op": "add", "path": "/news", "value": ["value"]},
]
changeset.changeset = patch_1
changeset.save()
bucket.apply_changeset(changeset)
if __name__ == '__main__':
create_records() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import nysol.mcmd as nm
import nysol.util as nu
import nysol.util.margs as margs
from nysol.util.mtemp import Mtemp
from nysol.util.mmkdir import mkDir
from nysol.util.mparallel import meach as meach
from nysol.util.mrecount import mrecount
class mccomp(object):
helpMSG="""
----------------------------
#{$cmd} version #{$version}
----------------------------
概要) 連結成分を出力する
用法) #{$cmd} ei= ef= [ni=] [nf=] [o=] [T=] [-verbose] [-mcmdenv] [--help]
ファイル名指定
ei= : 枝データファイル
ef= : 枝データ上の2つの節点項目名(省略時は"node1,node2")
ni= : 節点データファイル
nf= : 節点データ上の節点項目名(省略時は"node")
o= : 出力ファイル(連結成分ID-枝:-nodeを指定することでクリークID-節点に変更可能,省略時は標準出力)
その他
T= : ワークディレクトリ(default:/tmp)
-verbose : Rの実行ログを出力
--help : ヘルプの表示
入力形式)
一般グラフを節点ペアで表現した形式。
o=の出力形式)
節点と連結成分IDを出力する。
出力項目は"id,node,size"の3項目である。
sizeは連結成分を構成する節点数である。
例)
$ cat data/edge.csv
n1,n2
a,d
a,e
b,f
d,e
f,g
g,b
g,h
$ #{$cmd} ei=edge.csv ef=n1,n2 o=output.csv
##END# #{$cmd} ei=edge.csv ef=n1,n2 -node o=output.csv
$ cat output.csv
id%0,node,size
1,a,3
1,d,3
1,e,3
2,b,4
2,f,4
2,g,4
2,h,4
例) 節点ファイルも指定した例
$ cat node.csv
n
a
b
c
d
e
f
g
h
$ #{$cmd} ei=edge.csv ef=n1,n2 ni=node.csv nf=n o=output.csv
#END# #{$cmd} ei=edge.csv o=output.csv ef=n1,n2 ni=node.csv nf=n
$ cat output.csv
id%0,node,size
1,a,3
1,d,3
1,e,3
2,b,4
2,f,4
2,g,4
2,h,4
3,c,1
4,i,1
# Copyright(c) NYSOL 2012- All Rights Reserved.
"""
verInfo="version=0.1"
paramter = {
"ei":"str",
"ef":"str",
"ni":"str",
"nf":"str",
"o":"str",
"rp":"bool",
"verbose":"bool"
}
paramcond = {
"hissu": ["ei","ef"]
}
def help():
print(mccomp.helpMSG)
def ver():
print(mccomp.verInfo)
def __param_check_set(self , kwd):
# 存在チェック
for k,v in kwd.items():
if not k in mccomp.paramter :
raise( Exception("KeyError: {} in {} ".format(k,self.__class__.__name__) ) )
self.msgoff = True
self.oFile = kwd["o"] if "o" in kwd else None
self.ei = kwd["ei"]
ef0 = kwd["ef"].split(",")
self.ef1 = ef0[0]
self.ef2 = ef0[1]
self.ni = kwd["ni"] if "ni" in kwd else None
self.nf = kwd["nf"] if "nf" in kwd else None
self.verbose = kwd["verbose"] if "verbose" in kwd else False
self.rpf = kwd["rp"] if "rp" in kwd else False
def __cmdline(self):
cmdline = self.__class__.__name__
for k,v in self.args.items():
if type(v) is bool :
if v == True :
cmdline += " -" + str(k)
else:
cmdline += " " + str(k) + "=" + str(v)
return cmdline
def __init__(self,**kwd):
#パラメータチェック
self.args = kwd
self.__param_check_set(kwd)
####
# generating the R script for graph features
# pars: parameters for each graph feature
def genRscript(self,eFile,oFile,cidFile,scpFile):
r_proc = '''
library(igraph)
## reading edge file
g=read.graph("{eFile}",format="edgelist",directed=FALSE)
c=components(g)
seq=0:(length(c$membership)-1)
dat=data.frame(id=c$membership,nid=seq,size=c$csize[c$membership])
write.csv(dat,file="{oFile}",quote=FALSE,row.names = FALSE)
write.table(max(c$membership),file="{cidFile}",col.names = FALSE,row.names = FALSE)
'''.format( eFile = eFile ,oFile = oFile,cidFile = cidFile)
with open(scpFile,"w") as fpw:
fpw.write(r_proc)
def conv2num(self,ei,ni,ef1,ef2,nf,numFile,mapFile,isoFile):
#MCMD::msgLog("converting graph files into a pair of numbered nodes ...")
#wf=MCMD::Mtemp.new
#wf1=wf.file
#wf2=wf.file
#wf3=wf.file
allinObj =[]
inobj1 = nm.mcut(f="%s:node"%(ef1),i=ei )
inobj2 = nm.mcut(f="%s:node"%(ef2),i=ei )
allinObj.append(inobj1)
allinObj.append(inobj2)
if nf :
allinObj.append(nm.mcut(f="%s:node"%(nf),i=ni) )
#nodes list that are included in edge
ne_list = nm.muniq(k="node",i=[inobj1,inobj2]).msetstr(v=1,a="eNode")
# isolate nodes list
iso_f = nm.mcommon(i=allinObj ,k="node",m=ne_list,r=True).mcut(f="node",o=isoFile)
iso_f.run()
# create a mapping table between the original node label and the number iGraph will use
map_f = nm.muniq(i=allinObj , k="node" )
map_f <<= nm.mjoin(m=ne_list , k="node" ,f="eNode" )
map_f <<= nm.mnullto( f="eNode",v=0 )
map_f <<= nm.mnumber(s="eNode%r,node",a="nid",o=mapFile)
map_f.run()
# create a data file that R script read
f=None
f <<= nm.mjoin(k=ef1,K="node",m=mapFile,f="nid:nid1",i=ei)
f <<= nm.mjoin(k=ef2,K="node",m=mapFile,f="nid:nid2")
f <<= nm.mcut(f="nid1,nid2",nfno=True)
f <<= nm.cmd("tr ',' ' ' " )
f <<= nm.mwrite(o=numFile)
f.run()
# ============
# entry point
def run(self,**kw_args):
os.environ['KG_ScpVerboseLevel'] = "2"
if "msg" in kw_args:
if kw_args["msg"] == "on":
os.environ['KG_ScpVerboseLevel'] = "4"
# convert the original graph to one igraph can handle
temp=Mtemp()
numFile = temp.file()
mapFile = temp.file()
isoFile = temp.file()
cluFile = temp.file()
cidFile = temp.file()
scpFile = temp.file()
self.conv2num(self.ei,self.ni,self.ef1,self.ef2,self.nf,numFile,mapFile,isoFile)
self.genRscript(numFile,cluFile,cidFile,scpFile)
if self.verbose :
os.system("R --vanilla -q < %s"%(scpFile))
else:
os.system("R --vanilla -q --slave < %s 2>/dev/null "%(scpFile))
cid=0
with open(cidFile) as rfp:
xxx = rfp.read()
cid = int(xxx.rstrip())
isc = None
isc <<= nm.mnumber(s="node", S=cid+1 ,a="id",i=isoFile)
isc <<= nm.msetstr(v=1,a="size")
isc <<= nm.mcut(f="id,node,size")
if self.nf:
isc <<= nm.mfldname(f="node:"+self.nf)
# #{cluFile}
# id,nid,size
# 1,0,3
# 2,1,4
# 1,2,3
# 1,3,3
# 2,4,4
# 2,5,4
# 2,6,4
cln = None
cln <<= nm.mjoin(k="nid",m=mapFile,i=cluFile,f="node")
cln <<= nm.mcut(f="id,node,size")
if self.nf:
cln <<= nm.mfldname(f="node:"+self.nf)
nm.msortf(f="id",i=[isc,cln],o=self.oFile,rp=self.rpf).run()
nu.mmsg.endLog(self.__cmdline())
|
# discord-components
from discord_components import DiscordComponents, Button, ButtonStyle, Select, SelectOption
import asyncio
import discord
async def timeout_button(msg):
await msg.edit(components=[
Button(style=4, label="Timed Out!", disabled=True, custom_id="timed_out"),
],
)
async def clear(msg):
await msg.edit(components=[])
async def cancel(msg):
await msg.edit(components=[
Button(style=4, label="The command was canceled", disabled=True, custom_id="cmd_canceled"),
],
)
def accept():
return "Accept"
def deny():
return "Run Away"
|
import cv2
import os
IMAGE_SIZE = (200, 200)
def init_image(path):
detector = cv2.AKAZE_create()
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, IMAGE_SIZE)
return detector.detectAndCompute(image, None)
def compare(des1, des2):
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.match(des1, des2)
return [m.distance for m in matches]
if __name__ == "__main__":
# オリジナルファイルの情報を取得
original_kp, original_des = init_image("./images/cat-original.jpg")
ret = compare(original_des, original_des)
print("オリジナル:{0}".format(sum(ret) / len(ret)))
# オリジナルとサイズ違いの比較
size_kp, size_des = init_image("./images/cat-small.jpg")
ret = compare(original_des, size_des)
print("サイズ違い:{0}".format(sum(ret) / len(ret)))
# オリジナルと色違いの比較
color_kp, color_des = init_image("./images/cat-color.jpg")
ret = compare(original_des, color_des)
print("色違い:{0}".format(sum(ret) / len(ret)))
# オリジナルと画質違いの比較
quality_kp, quality_des = init_image("./images/cat-quality.jpg")
ret = compare(original_des, quality_des)
print("画質違い:{0}".format(sum(ret) / len(ret)))
# 全く異なる画像との比較
different_kp, different_des = init_image("./images/cat-different.jpg")
ret = compare(original_des, different_des)
print("全く異なる画像:{0}".format(sum(ret) / len(ret)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.