code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# python+selenium识别验证码
#
import re
import requests
import pytesseract
from selenium import webdriver
from PIL import Image,Image
import time
#
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://higo.flycua.com/hp/html/login.html")
driver.implicitly_wait(30)
# 下面用户名和密码涉及到我个人信息,所以隐藏
driver.find_element_by_name('memberId').send_keys('xxxxxx')
driver.find_element_by_name('password').send_keys('xxxxxx')
# 因为验证码不能一次就正确识别,我加了循环,一直识别,直到登录成功
while True:
# 清空验证码输入框,因为可能已经识别过一次了,里面有之前识别的错的验证码
driver.find_element_by_name("verificationCode").clear()
# 截图或验证码图片保存地址
screenImg = "H:\screenImg.png"
# 浏览器页面截屏
driver.get_screenshot_as_file(screenImg)
# 定位验证码位置及大小
location = driver.find_element_by_name('authImage').location
size = driver.find_element_by_name('authImage').size
# 下面四行我都在后面加了数字,理论上是不用加的,但是不加我这截的不是验证码那一块的图,可以看保存的截图,根据截图修改截图位置
left = location['x'] + 530
top = location['y'] + 175
right = location['x'] + size['width'] + 553
bottom = location['y'] + size['height'] + 200
# 从文件读取截图,截取验证码位置再次保存
img = Image.open(screenImg).crop((left, top, right, bottom))
# 下面对图片做了一些处理,能更好识别一些,相关处理再百度看吧
img = img.convert('RGBA') # 转换模式:L | RGB
img = img.convert('L') # 转换模式:L | RGB
img = Image.Contrast(img) # 增强对比度
img = img.enhance(2.0) # 增加饱和度
img.save(screenImg)
# 再次读取识别验证码
img = Image.open(screenImg)
code = pytesseract.image_to_string(img)
# 打印识别的验证码
# print(code.strip())
# 识别出来验证码去特殊符号,用到了正则表达式,这是我第一次用,之前也没研究过,所以用的可能粗糙,请见谅
b = ''
for i in code.strip():
pattern = re.compile(r'[a-zA-Z0-9]')
m = pattern.search(i)
if m != None:
b += i
# 输出去特殊符号以后的验证码
print(b)
# 把b的值输入验证码输入框
driver.find_element_by_name("verificationCode").send_keys(b)
# 点击登录按钮
driver.find_element_by_class_name('login-form-btn-submit').click()
# 定时等待5秒,如果验证码识别错误,提示验证码错误需要等一会儿才能继续操作
time.sleep(5)
# 获取cookie,并把cookie转化为字符串格式
cookie1 = str(driver.get_cookies())
print(cookie1)
# 第二次用正则表达式,同样有点粗糙,代码实现的功能就是看cookie里是否有tokenId这个词,如果有说明登录成功,跳出循环,可以进行后面的自动化操作,如果没有,则表示登录失败,继续识别验证码
matchObj = re.search(r'tokenId', cookie1, re.M | re.I)
if matchObj:
print(matchObj.group())
break
else:
print("No match!!")
print('结束')
|
1065865483/0python_script
|
test/imag_test.py
|
Python
|
mit
| 3,355
|
import argparse
import select
def no_piped_input(arguments):
inputs_ready, _, _ = select.select([arguments.file], [], [], 0)
return not bool(inputs_ready)
def parse_args(args, input):
parser = argparse.ArgumentParser()
parser.add_argument('--url', help="URL of the target data-set",
required=True)
parser.add_argument('--token', help="Bearer token for the target data-set",
required=True)
parser.add_argument('--timeout', help="Request timeout. Default: 5 seconds",
required=False, default=5, type=float)
parser.add_argument('--attempts', help="Number of times to attempt sending data. Default: 3",
required=False, default=3, type=int)
parser.add_argument('--failfast', help="Don't retry sending data",
required=False, default=False, action='store_true')
parser.add_argument('--sleep', help=argparse.SUPPRESS,
required=False, default=3, type=int)
parser.add_argument('file', help="File containing JSON to send", nargs='?',
type=argparse.FileType('r'),
default=input)
arguments = parser.parse_args(args)
if arguments.failfast:
arguments.attempts = 1
if no_piped_input(arguments):
parser.error("No input provided")
return arguments
|
alphagov/backdropsend
|
backdropsend/argumentsparser.py
|
Python
|
mit
| 1,401
|
# Define a function sum() and a function multiply()
# that sums and multiplies (respectively) all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10,
# and multiply([1, 2, 3, 4]) should return 24.
def check_list(num_list):
"""Check if input is list"""
if num_list is None:
return False
if len(num_list) == 0:
return False
new_list = []
for i in num_list:
if i!='[' and i!=']' and i!=',':
new_list.append(i)
for x in new_list:
if type(x) != int:
return False
return True
def sum(num_list):
"""Compute sum of list values"""
if check_list(num_list):
final_sum = 0
for i in num_list:
final_sum = final_sum + i
return final_sum
else:
return False
def multiply(num_list):
"""Multiply list values"""
if check_list(num_list):
final_sum = 1
for i in num_list:
final_sum = final_sum * i
return final_sum
else:
return False
def main():
get_list = input("Enter list: ")
operations = [sum, multiply]
print map(lambda x: x(get_list), operations)
if __name__ == "__main__":
main()
|
giantas/minor-python-tests
|
Operate List/operate_list.py
|
Python
|
mit
| 1,301
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: exercicios_resolvidos\capitulo 05\exercicio-05-17.py
##############################################################################
# O programa pára logo após imprimir a quantidade de cédulas de R$50,00
|
laenderoliveira/exerclivropy
|
exercicios_resolvidos/capitulo 05/exercicio-05-17.py
|
Python
|
mit
| 695
|
#!/bin/python
# Solution for https://www.hackerrank.com/challenges/alien-username
import re
def is_valid_username(s):
pattern = r'^[_\.][0-9]+[a-zA-Z]*_?$'
match = re.match(pattern, s)
return match
n = int(raw_input().strip())
for i in range(n):
s = raw_input().strip()
if is_valid_username(s):
print 'VALID'
else:
print 'INVALID'
|
ernestoalarcon/competitiveprogramming
|
alien-username.py
|
Python
|
mit
| 373
|
# encoding: utf-8
"""Implementations for various useful completers.
These are all loaded by default by IPython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import glob
import inspect
import os
import re
import sys
try:
# Python >= 3.3
from importlib.machinery import all_suffixes
_suffixes = all_suffixes()
except ImportError:
from imp import get_suffixes
_suffixes = [ s[0] for s in get_suffixes() ]
# Third-party imports
from time import time
from zipimport import zipimporter
# Our own imports
from IPython.core.completer import expand_user, compress_user
from IPython.core.error import TryNext
from IPython.utils._process_common import arg_split
from IPython.utils.py3compat import string_types
# FIXME: this should be pulled in with the right call via the component system
from IPython import get_ipython
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Time in seconds after which the rootmodules will be stored permanently in the
# ipython ip.db database (kept in the user's .ipython dir).
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
r'(?P<package>[/\\]__init__)?'
r'(?P<suffix>%s)$' %
r'|'.join(re.escape(s) for s in _suffixes))
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
# A few local constants to be used in loops below
pjoin = os.path.join
if os.path.isdir(path):
# Build a list of all files in the directory and all files
# in its subdirectories. For performance reasons, do not
# recurse more than one level into subdirectories.
files = []
for root, dirs, nondirs in os.walk(path, followlinks=True):
subdir = root[len(path)+1:]
if subdir:
files.extend(pjoin(subdir, f) for f in nondirs)
dirs[:] = [] # Do not recurse into additional subdirectories.
else:
files.extend(nondirs)
else:
try:
files = list(zipimporter(path)._files.keys())
except:
files = []
# Build a list of modules which match the import_re regex.
modules = []
for f in files:
m = import_re.match(f)
if m:
modules.append(m.group('name'))
return list(set(modules))
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
"""
ip = get_ipython()
rootmodules_cache = ip.db.get('rootmodules_cache', {})
rootmodules = list(sys.builtin_module_names)
start_time = time()
store = False
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove('__init__')
except ValueError:
pass
if path not in ('', '.'): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once - type '%rehashx' to "
"reset cache!)\n")
sys.stdout.flush()
if time() - start_time > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
return []
rootmodules.extend(modules)
if store:
ip.db['rootmodules_cache'] = rootmodules_cache
rootmodules = list(set(rootmodules))
return rootmodules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = {c for c in completions if isinstance(c, string_types)}
completions.discard('__init__')
return list(completions)
#-----------------------------------------------------------------------------
# Completion-related functions.
#-----------------------------------------------------------------------------
def quick_completer(cmd, completions):
""" Easily create a trivial completer for a command.
Takes either a list of completions, or all completions in string (that will
be split on whitespace).
Example::
[d:\ipython]|1> import ipy_completers
[d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
[d:\ipython]|3> foo b<TAB>
bar baz
[d:\ipython]|3> foo ba
"""
if isinstance(completions, string_types):
completions = completions.split()
def do_complete(self, event):
return completions
get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
#-----------------------------------------------------------------------------
# Completers
#-----------------------------------------------------------------------------
# These all have the func(self, event) signature to be used as custom
# completers
def module_completer(self,event):
"""Give completions after user has typed 'import ...' or 'from ...'"""
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return module_completion(event.line)
# FIXME: there's a lot of logic common to the run, cd and builtin file
# completers, that is currently reimplemented in each.
def magic_run_completer(self, event):
"""Complete files that end in .py or .ipy or .ipynb for the %run command.
"""
comps = arg_split(event.line, strict=False)
# relpath should be the current token that we need to complete.
if (len(comps) > 1) and (not event.line.endswith(' ')):
relpath = comps[-1].strip("'\"")
else:
relpath = ''
#print("\nev=", event) # dbg
#print("rp=", relpath) # dbg
#print('comps=', comps) # dbg
lglob = glob.glob
isdir = os.path.isdir
relpath, tilde_expand, tilde_val = expand_user(relpath)
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
if any(magic_run_re.match(c) for c in comps):
matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
for f in lglob(relpath+'*')]
else:
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
matches = dirs + pys
#print('run comp:', dirs+pys) # dbg
return [compress_user(p, tilde_expand, tilde_val) for p in matches]
def cd_completer(self, event):
"""Completer function for cd, which only returns directories."""
ip = get_ipython()
relpath = event.symbol
#print(event) # dbg
if event.line.endswith('-b') or ' -b ' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', None)
if bkms:
return bkms.keys()
else:
return []
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
# Expand ~ in path and normalize directory separators.
relpath, tilde_expand, tilde_val = expand_user(relpath)
relpath = relpath.replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise TryNext
found.append(d)
if not found:
if os.path.isdir(relpath):
return [compress_user(relpath, tilde_expand, tilde_val)]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{})
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise TryNext
return [compress_user(p, tilde_expand, tilde_val) for p in found]
def reset_completer(self, event):
"A completer for %reset magic"
return '-f -s in out array dhist'.split()
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/IPython/core/completerlib.py
|
Python
|
mit
| 11,780
|
#! /usr/bin/python
class Indexer:
def __getitem__(self, index):
return index ** 2
x = Indexer()
for i in range(5):
print x[i],
class Stepper:
def __getitem__(self, index):
return self.data[index]
s = Stepper()
s.data = "spam"
for x in s:
print x,
print s.data[0]
|
yuweijun/learning-programming
|
language-python/getitem.py
|
Python
|
mit
| 281
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
Some utility functions for entailment project
"""
from collections import defaultdict as dd
from metrics import *
def get_eval_metric(metric_name):
if metric_name == "jaccard":
return jaccard_index
elif metric_name == "1":
return entail_score1
elif metric_name == "2":
return entail_score2
elif metric_name == "3":
return entail_score3
def get_test_pairs(test_pairs):
pairs = []
for line in open(test_pairs):
w1, w2, tag = line.split()
pairs.append((w1, w2, tag))
return pairs
def get_contexts_above_threshold(test_set, subs_file, threshold):
words = dd(set)
for line_num, line in enumerate(subs_file):
line = line.split()
#tw = line[0]
for i in xrange(1, len(line)-1, 2):
word = line[i]
if word in test_set:
prob = float(line[i+1])
if prob >= threshold:
words[word].add(line_num)
return words, line_num + 1
|
osmanbaskaya/text-entail
|
run/entail_utils.py
|
Python
|
mit
| 1,076
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
face_cascade = cv2.CascadeClassifier('/home/tianyiz/user/601project/c/haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Background reduce
fgmask = fgbg.apply(img)
cv2.imshow('Reduce',fgmask)
for (x,y,w,h) in faces:
print(x,y,w,h)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
Tianyi94/EC601Project_Somatic-Parkour-Game-based-on-OpenCV
|
Old Code/ControlPart/FaceDetection+BackgroundReduction.py
|
Python
|
mit
| 723
|
a = "1"
b = 1
print("Arvud on " + 5 * a + " ja " + str(5 * b))
|
captainhungrykaboom/MTAT.TK.006
|
6. märts - 12. märts ülesanded/harjutus ülesanne 6.py
|
Python
|
mit
| 62
|
# coding:utf8
class Plugin(object):
__doc__ = '''Плагин предназначен для остановки бота.
Для использования необходимо иметь уровень доступа {protection} или выше
Ключевые слова: [{keywords}]
Использование: {keyword}
Пример: {keyword}'''
name = 'stop'
keywords = (u'стоп', name, '!')
protection = 3
argument_required = False
def respond(self, msg, rsp, utils, *args, **kwargs):
utils.stop_bot()
rsp.text = u'Завершаю работу. Удачного времени суток!'
return rsp
|
Fogapod/VKBot
|
bot/plugins/plugin_stop.py
|
Python
|
mit
| 684
|
import math
from pathlib import Path
from tkinter import W, N, E, StringVar, PhotoImage
from tkinter.ttk import Button, Label, LabelFrame
from overrides import overrides
from pyminutiaeviewer.gui_common import NotebookTabBase
from pyminutiaeviewer.minutia import Minutia, MinutiaType
class MinutiaeEditorFrame(NotebookTabBase):
# TODO: I'd like to remove the <minutiae> parameter
def __init__(self, parent, load_fingerprint_func, load_minutiae_func, save_minutiae_file):
super(self.__class__, self).__init__(parent, load_fingerprint_func)
self.root = parent
self.minutiae_count = StringVar()
self._update_minutiae_count()
self.current_minutiae = None
self.load_minutiae_btn = Button(self, text="Load Minutiae", command=load_minutiae_func)
self.load_minutiae_btn.grid(row=1, column=0, sticky=N + W + E)
self.export_minutiae_btn = Button(self, text="Export Minutiae", command=save_minutiae_file)
self.export_minutiae_btn.grid(row=2, column=0, sticky=N + W + E)
self.info_frame = InfoFrame(self, "Info", self.minutiae_count)
self.info_frame.grid(row=3, column=0, padx=4, sticky=N + W + E)
@overrides
def load_fingerprint_image(self, image):
self._update_minutiae_count()
@overrides
def load_minutiae_file(self):
self._update_minutiae_count()
def _update_minutiae_count(self):
self.minutiae_count.set("Minutiae: {}".format(self.root.number_of_minutiae()))
@overrides
def on_canvas_mouse_left_click(self, event):
"""
Adds a new bifurcation at the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
self.current_minutiae = ((x, y), MinutiaType.RIDGE_ENDING)
@overrides
def on_canvas_ctrl_mouse_left_click(self, event):
"""
Adds a new ridge ending at the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
self.current_minutiae = ((x, y), MinutiaType.BIFURCATION)
@overrides
def on_canvas_mouse_right_click(self, event):
"""
Removes a minutiae close to the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
scale_factor = self.root.canvas_image_scale_factor()
x, y = x * scale_factor, y * scale_factor
possible_minutiae = []
for i in range(self.root.number_of_minutiae()):
m = self.root.minutiae[i]
dist = abs(m.x - x) + abs(m.y - y)
if dist < 10:
possible_minutiae.append((dist, i))
# Sort ascending, in-place.
possible_minutiae.sort(key=lambda tup: tup[0])
if len(possible_minutiae) == 0:
return
else:
del self.root.minutiae[possible_minutiae[0][1]]
self.root.draw_minutiae()
self._update_minutiae_count()
@overrides
def on_canvas_mouse_left_drag(self, event):
"""
Sets the angle of the minutiae being placed.
"""
x, y = event.x, event.y
((sx, sy), minutiae_type) = self.current_minutiae
angle = math.degrees(math.atan2(y - sy, x - sx)) + 90
minutia = Minutia(round(sx), round(sy), angle, minutiae_type, 1.0)
self.root.draw_single_minutia(minutia)
@overrides
def on_canvas_mouse_left_release(self, event):
"""
Places the minutiae currently being edited..
"""
x, y = event.x, event.y
scale_factor = self.root.canvas_image_scale_factor()
((px, py), minutiae_type) = self.current_minutiae
angle = math.degrees(math.atan2(y - py, x - px)) + 90
self.root.minutiae.append(Minutia(round(px * scale_factor), round(py * scale_factor), angle, minutiae_type, 1.0))
self.current_minutiae = None
self.root.draw_minutiae()
self._update_minutiae_count()
class InfoFrame(LabelFrame):
def __init__(self, parent, title, minutiae_count):
super(self.__class__, self).__init__(parent, text=title)
self.current_number_minutiae_label = Label(self, textvariable=minutiae_count)
self.current_number_minutiae_label.grid(row=0, column=0, sticky=N + W + E)
self.bifurcation_label = Label(self, text="Bifurcation (LMB):")
self.bifurcation_label.grid(row=1, column=0, sticky=W)
self.bifurcation_image = PhotoImage(file=Path(__file__).resolve().parent / 'images' / 'bifurcation.png')
self.bifurcation_image_label = Label(self, image=self.bifurcation_image)
self.bifurcation_image_label.grid(row=2, column=0, sticky=W)
self.ridge_ending_label = Label(self, text="Ridge Ending (CTRL + LMB):")
self.ridge_ending_label.grid(row=3, column=0, sticky=W)
self.ridge_ending_image = PhotoImage(file=Path(__file__).resolve().parent / 'images' / 'ridge_ending.png')
self.ridge_ending_image_label = Label(self, image=self.ridge_ending_image)
self.ridge_ending_image_label.grid(row=4, column=0, sticky=W)
|
IgniparousTempest/py-minutiae-viewer
|
pyminutiaeviewer/gui_editor.py
|
Python
|
mit
| 5,203
|
"""This screws up visualize.py"""
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from torch.autograd import Variable as Var
from torch import Tensor
class RealtimePlot():
def __init__(self, style='ggplot'):
plt.style.use(style)
plt.ion()
self.fig, self.ax = plt.subplots()
self.xlim = 0
self.yvals = []
self.line = Line2D([], [])
self.ax.add_line(self.line)
def config(self, ylabel, xlabel):
self.ax.set_ylabel(ylabel)
self.ax.set_xlabel(xlabel)
self.fig.tight_layout()
def plot(self, y):
self.yvals.append(y)
self.line.set_data(np.arange(len(self.yvals)), self.yvals)
self.ax.relim()
self.ax.autoscale_view()
self.ax.set_xlim(0, self.xlim)
self.xlim += 1
self.fig.canvas.flush_events()
def done(self):
plt.ioff()
plt.show()
def policyplot(env, policy, trj_len):
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
y = np.zeros((trj_len, action_dim))
X = np.zeros((trj_len, obs_dim))
obs = env.reset()
for t in range(trj_len):
X[t, :] = obs
action = policy(Var(Tensor(obs[None, :]))).data.numpy()[0]
y[t, :] = action
obs = env.step(action)[0]
fig, axes = plt.subplots(1, action_dim)
for a in range(action_dim):
axes[a].plot(np.arange(trj_len), y[:, a])
plt.show()
"""
|
p-morais/rl
|
rl/utils/plotting.py
|
Python
|
mit
| 1,503
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import time
from msrestazure.azure_exceptions import CloudError
import azure.mgmt.relay.models
from azure.mgmt.relay.models import RelayNamespace, Sku, SkuTier, Relaytype, AuthorizationRule, AccessRights, AccessKeys, WcfRelay, ErrorResponseException, ErrorResponse
from azure.common.credentials import ServicePrincipalCredentials
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
class MgmtWcfRelayTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtWcfRelayTest, self).setUp()
self.relay_client = self.create_mgmt_client(
azure.mgmt.relay.RelayManagementClient
)
@ResourceGroupPreparer()
def test_wcfrelay_curd(self, resource_group, location):
resource_group_name = resource_group.name
#Create a Namespace
namespace_name = "testingpythontestcaseeventhubnamespaceEventhub"
namespaceparameter = RelayNamespace(location, {'tag1': 'value1', 'tag2': 'value2'}, Sku(SkuTier.standard))
creatednamespace = self.relay_client.namespaces.create_or_update(resource_group_name, namespace_name, namespaceparameter).result()
self.assertEqual(creatednamespace.name, namespace_name)
#
# # Get created Namespace
#
getnamespaceresponse = self.relay_client.namespaces.get(resource_group_name, namespace_name)
self.assertEqual(getnamespaceresponse.name, namespace_name)
# Create a WcfRelay
wcfrelay_name = "testingpythontestcasewcfrelay"
wcfrelayparameter = WcfRelay(
relay_type=Relaytype.net_tcp,
requires_client_authorization=True,
requires_transport_security=True,
user_metadata="User data for WcfRelay"
)
createdwcfrelayresponse = self.relay_client.wcf_relays.create_or_update(resource_group_name, namespace_name, wcfrelay_name, wcfrelayparameter)
self.assertEqual(createdwcfrelayresponse.name, wcfrelay_name)
self.assertEqual(createdwcfrelayresponse.requires_client_authorization, True)
#Get the created wcfRelay
geteventhubresponse = self.relay_client.wcf_relays.get(resource_group_name, namespace_name, wcfrelay_name)
self.assertEqual(geteventhubresponse.name, wcfrelay_name)
self.assertEqual(geteventhubresponse.requires_transport_security, True)
self.assertEqual(geteventhubresponse.user_metadata, "User data for WcfRelay")
#Get the List of wcfRealy by namespaces
getlistbynamespacewcfrelayresponse = list(self.relay_client.wcf_relays.list_by_namespace(resource_group_name, namespace_name))
self.assertGreater(len(getlistbynamespacewcfrelayresponse), 0)
# update the Created eventhub
wcfrelayupdateparameter = WcfRelay(
relay_type=Relaytype.net_tcp,
user_metadata="User data for WcfRelay updated"
)
updatewcfrelayresponse = self.relay_client.wcf_relays.create_or_update(resource_group_name, namespace_name,
wcfrelay_name, wcfrelayupdateparameter)
self.assertEqual(updatewcfrelayresponse.name, wcfrelay_name)
self.assertEqual(updatewcfrelayresponse.requires_transport_security, True)
self.assertEqual(updatewcfrelayresponse.requires_client_authorization, True)
self.assertEqual(updatewcfrelayresponse.user_metadata, "User data for WcfRelay updated")
# Create a new authorizationrule
authoRule_name = "testingauthrulepy"
createwcfrelayauthorule = self.relay_client.wcf_relays.create_or_update_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name,[AccessRights('Send'),AccessRights('Listen')])
self.assertEqual(createwcfrelayauthorule.name, authoRule_name, "Authorization rule name not as created - create_or_update_authorization_rule ")
self.assertEqual(len(createwcfrelayauthorule.rights), 2)
# Get the created authorizationrule
getwcfrelayauthorule = self.relay_client.wcf_relays.get_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
self.assertEqual(getwcfrelayauthorule.name, authoRule_name, "Authorization rule name not as passed as parameter - get_authorization_rule ")
self.assertEqual(len(getwcfrelayauthorule.rights), 2, "Access rights mis match as created - get_authorization_rule ")
# update the rights of the authorizatiorule
getwcfrelayauthorule.rights.append('Manage')
updatewcfrelayauthorule = self.relay_client.wcf_relays.create_or_update_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name, getwcfrelayauthorule.rights)
self.assertEqual(updatewcfrelayauthorule.name, authoRule_name, "Authorization rule name not as passed as parameter for update call - create_or_update_authorization_rule ")
self.assertEqual(len(updatewcfrelayauthorule.rights), 3, "Access rights mis match as updated - create_or_update_authorization_rule ")
#list all the authorization ruels for the given namespace
wcfrelayauthorulelist = list(self.relay_client.wcf_relays.list_authorization_rules(resource_group_name, namespace_name, wcfrelay_name))
self.assertEqual(len(wcfrelayauthorulelist), 1, "number of authorization rule mismatch with the created + default = 2 - list_authorization_rules")
#List keys for the authorization rule
listkeysauthorizationrule = self.relay_client.wcf_relays.list_keys(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
self.assertIsNotNone(listkeysauthorizationrule)
# regenerate Keys for authorizationrule - Primary
regenratePrimarykeyauthorizationrule = self.relay_client.wcf_relays.regenerate_keys(resource_group_name, namespace_name, wcfrelay_name, authoRule_name, 'PrimaryKey')
self.assertNotEqual(listkeysauthorizationrule.primary_key,regenratePrimarykeyauthorizationrule.primary_key)
# regenerate Keys for authorizationrule - Primary
regenrateSecondarykeyauthorizationrule = self.relay_client.wcf_relays.regenerate_keys(resource_group_name,namespace_name, wcfrelay_name, authoRule_name, 'SecondaryKey')
self.assertNotEqual(listkeysauthorizationrule.secondary_key, regenrateSecondarykeyauthorizationrule.secondary_key)
# delete the authorizationrule
self.relay_client.wcf_relays.delete_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
# Delete the created WcfRelay
getwcfrelayresponse = self.relay_client.wcf_relays.delete(resource_group_name, namespace_name, wcfrelay_name)
# Delete the create namespace
self.relay_client.namespaces.delete(resource_group_name, namespace_name).result()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-relay/tests/test_azure_mgmt_wcfrelay.py
|
Python
|
mit
| 7,326
|
from tkinter import *
import mysql.connector as mysql
from MySQLdb import dbConnect
from HomeOOP import *
import datetime
from PIL import Image, ImageTk
class MainMenu(Frame):
def __init__(self, parent): #The very first screen of the web app
Frame.__init__(self, parent)
w, h = parent.winfo_screenwidth(), parent.winfo_screenheight()
#parent.overrideredirect(1)
parent.geometry("%dx%d+0+0" % (w, h))
frame = Frame(parent, width=w, height=h).place(x=350, y=450)
# frame.pack(expand=True)
# canvas = Canvas(parent, width=w, height=h)
# scale_width = w / 3900
# scale_height = h / 2613
web = "https://raw.githubusercontent.com/ACBL-Bridge/Bridge-Application/master/Login/"
URL = "login_background_resized.jpg"
u = urlopen(web + URL)
raw_data = u.read()
u.close()
im = Image.open(BytesIO(raw_data))
bckgrd = ImageTk.PhotoImage(im)
login_bckgrd = Label(frame, image=bckgrd)
login_bckgrd.image = bckgrd
login_bckgrd.place(x=0, y=0, relwidth=1, relheight=1)
titleLabel = Label(frame, text="LET'S PLAY BRIDGE", fg="black", font='Arial 36')
titleLabel.pack(side="top", pady=150)
loginButton = Button(frame, text="Existing User", fg="blue", font="Arial 14", command=lambda: self.LoginScreen(parent))
loginButton.pack(side='top')
signupButton = Button(frame, text="Sign up", fg="blue", font="Arial 14", command=self.SignupScreen)
signupButton.pack(side="top")
quitButton = Button(frame, text="Quit", font="Arial 14", command=self.SignupScreen)
quitButton.pack(side="top")
####################################Login - GUI ###########################
def LoginScreen(self,parent):
global entry_user
global entry_pass
top = Toplevel(self)
top.title("Log In - ABCL")
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("550x400+%d+%d" % (w/2-275, h/2-125)) #250
#top.configure(background = 'white')
quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20)
#entry_user = StringVar()
#entry_pass = StringVar()
# Frames to divide the window into three parts.. makes it easier to organize the widgets
topFrame = Frame(top)
topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=50)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
# Widgets and which frame they are in
#label = Label(topFrame, text="LET'S PLAY BRIDGE")
userLabel = Label(middleFrame, text='Username:', font="Arial 14")
passLabel = Label(middleFrame, text='Password:', font="Arial 14")
entry_user = Entry(middleFrame) # For DB
entry_pass = Entry(middleFrame, show ='*') # For DB
b = Button(bottomFrame, text="Log In",fg ="blue", font ="Arial 14", command=lambda: get_Login_input(self, parent))
#Location of the Widgets in their frames
#label.pack(side="top", fill="both", expand=True, padx=20, pady=20)
userLabel.grid(row=10, column=0, sticky=W, padx=20)
entry_user.grid(row=10, column=1, padx=20)
passLabel.grid(row=11, column=0, sticky=W, padx=20)
entry_pass.grid(row=11, column=1, padx=20)
b.grid(row=12, columnspan=2)
###############################################DATABASE Check Login!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def go_to_HomePage(user):
root = Tk()
app = Home(root,user)
root.mainloop()
def get_Login_input(self, parent):
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
cur.execute("SELECT username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
rows = cur.fetchall()
if rows:
cur.execute("SELECT firstname, lastname, username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
for namerow in cur.fetchall(): # print all the first cell
fn = namerow[0] #store firstname
ln = namerow[1] #store lastname
user = namerow[2]
self.destroy()
parent.destroy()
go_to_HomePage(user)
'''top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
# Frames to divide the window into three parts.. makes it easier to organize the widgets
topFrame = Frame(top)
topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=250)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
myProfileButton = Button(middleFrame, text="My Profile", fg="blue", font="Arial 14", command=self.myProfileScreen)
myProfileButton.pack()
quitButton = Button(top, text="Log Out", font="Arial 14", command=top.destroy).pack(side="bottom", padx=20)
#top.title(':D')
#top.geometry('250x200')
#get first name and last name of current player
cur.execute("SELECT firstname, lastname FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
for namerow in cur.fetchall(): # print all the first cell
fn = namerow[0] #store firstname
ln = namerow[1] #store lastname
rlb1 = Label(middleFrame, text='\nWelcome %s %s\n' % (fn, ln), font="Arial 14")
rlb1.pack()
rlb2 = Label(middleFrame, text='\nUserName: %s' % entry_user.get(), font="Arial 14")
rlb2.pack()
top.mainloop()
self.destroy()
parent.destroy()
go_to_HomePage()'''
else:
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Invalid Login')
rlbl.pack()
r.mainloop()
dbconn.close()
########################################## SIGN UP SCREEN - GUI ####################################################
def SignupScreen(self):
global entry_fname
global entry_lname
global entry_user
global entry_pass
global entry_repass
global entry_email
global entry_ACBL
global entry_disID
top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("550x450+%d+%d" % (w / 2 - 275, h / 2 - 140)) # 250
#top.configure(background='white')
quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20)
#topFrame = Frame(top)
#topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=50)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
# Widgets and which frame they are in
#label = Label(topFrame, text="LET'S PLAY BRIDGE")
fnameLabel = Label(middleFrame,text = 'First Name:',font="Arial 14")
lnameLabel = Label(middleFrame, text='Last Name:',font="Arial 14")
userLabel = Label(middleFrame, text='Username:',font="Arial 14")
passLabel = Label(middleFrame, text='Password:',font="Arial 14")
repassLabel = Label(middleFrame, text='Re-Enter Password:',font="Arial 14")
emailLabel = Label(middleFrame, text='Email(optional):',font="Arial 14")
ACBLnumLabel = Label(middleFrame, text='ACBLnum(optional):',font="Arial 14")
disIDLabel = Label(middleFrame, text='DistrictID(optional):',font="Arial 14")
entry_fname = Entry(middleFrame) #For DB
entry_lname = Entry(middleFrame) #For DB
entry_user = Entry(middleFrame)#For DB
entry_pass = Entry(middleFrame, show = '*')#For DB
entry_repass = Entry(middleFrame, show = '*')#For DB
entry_email = Entry(middleFrame)#For DB
entry_ACBL = Entry(middleFrame)#For DB
entry_disID = Entry(middleFrame)#For DB
b = Button(bottomFrame, text="Sign up", font="Arial 14", command=lambda : combined_Functions(self))
# Location of the Widgets in their frames
#label.pack(side="top", fill="both", expand=True, padx=20, pady=20)
fnameLabel.grid(row=1, column=0, sticky=W)
entry_fname.grid(row=1, column=1)
lnameLabel.grid(row=2, column=0, sticky=W)
entry_lname.grid(row=2, column=1)
userLabel.grid(row=3, column=0, sticky=W)
entry_user.grid(row=3, column=1)
passLabel.grid(row=4, column=0, sticky=W)
entry_pass.grid(row=4, column=1)
repassLabel.grid(row=5, column=0, sticky=W)
entry_repass.grid(row=5, column=1)
emailLabel.grid(row=6, column=0, sticky=W)
entry_email.grid(row=6, column=1, padx=20, sticky= W)
ACBLnumLabel.grid(row=7, column=0, sticky=W)
entry_ACBL.grid(row=7, column=1, padx=20)
disIDLabel.grid(row=8, column=0, sticky=W)
entry_disID.grid(row=8, column=1)
b.grid(row=10, columnspan=2)
####################################DATABASE Check if Username is available, check if passwords Match -> if so SIGN UP!!!!!!!!!!!!!!!
def get_Signup_input():
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
cur.execute("SELECT username FROM playerinfo WHERE username = '%s'" % entry_user.get())
rows = cur.fetchall()
if not rows:
# print(userInput + " is available")
if (entry_pass.get() == entry_repass.get()) and (entry_pass.get()!= "") and (entry_repass.get()!= ""):
# print("passwords match, good job brotha")
# INSERT new player ... playerinfo check
todaysdate = datetime.datetime.today().strftime('%Y-%m-%d') # current date
cur.execute("INSERT INTO playerinfo(username, password, signUpDate, firstname, lastname, email, ACLnum, districtID) VALUES('%s','%s','%s','%s','%s','%s','%s','%s')" % (
entry_user.get(), entry_pass.get(), todaysdate, entry_fname.get(), entry_lname.get(), entry_email.get(),entry_ACBL.get(), entry_disID.get()))
#get new player's ID
cur.execute("SELECT ID FROM playerinfo WHERE username='%s'" % entry_user.get())
for namerow in cur.fetchall(): # print all the first cell
idNum = namerow[0] # store ID number
# new player's...playerstats inserted by ID
cur.execute("INSERT INTO playerstats(ID) VALUES('%s')" % idNum)
dbconn.commit() #database commit aka save
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[+] Signed Up!')
rlbl.pack()
r.mainloop()
else:
# print("passwords don't match bruh or are NULL")
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Retype your passwords')
rlbl.pack()
r.mainloop()
else:
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Username Not Available ')
rlbl.pack()
r.mainloop()
dbconn.close()
def go_to_Tutorial():
window = Toplevel()
window.geometry("600x500")
quitButton = Button(window, text="Cancel", font="Arial 14", command= window.destroy).pack(side="bottom", padx=20)
top_Frame = Frame(window)
top_Frame.pack()
tLabel = Label(top_Frame, text="TUTORIAL", font="Arial 36").pack(side="top", fill="both", expand=True, padx=20, pady=20)
def combined_Functions(self): # for the Sign Up button - store data, exits Sign Up screen, goes to Tutorial screen
get_Signup_input()
# top.destroy()
#go_to_Tutorial()
#####################################My Profile - GUI #########################################
def myProfileScreen(self):
top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
w, h = self.winfo_screenwidth(), self.winfo_screenheight()
top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
topFrame = Frame(top)
topFrame.pack()
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
rightFrame = Frame(top)
rightFrame.pack(side= RIGHT)
leftFrame = Frame(top)
leftFrame.pack(side=LEFT)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@DB stuff@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#entry_user.get() //username
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
global data
data=[]
# get all info from playerinfo and playerstats using current username
cur.execute(
"SELECT playerinfo.firstname, playerinfo.lastname, playerinfo.username, playerinfo.email, playerinfo.signUpDate, playerinfo.districtID, playerinfo.ACLnum, playerstats.dealsplayed, playerstats.level, playerstats.exp, playerstats.coins, playerstats.tournys FROM playerstats INNER JOIN playerinfo ON playerinfo.ID=playerstats.ID AND playerinfo.username='%s'" % entry_user.get())
for namerow in cur.fetchall(): # print all info
fn = namerow[0] # firstname
ln = namerow[1] # lastname
un = namerow[2] #username
em = namerow[3] # email
sData = namerow[4] # signUpDate
districtID = namerow[5] # District ID
acblNumba = namerow[6] # ACBL Number
dPlay = namerow[7] #deals played
lvl = namerow[8] # level
exp = namerow[9] # experience
coins = namerow[10] # coins
tornys = namerow[11] # tournaments
dbconn.close() #close db connection
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
label = Label(topFrame, text="LET'S PLAY BRIDGE",font =('Coralva', 42)).pack(side="top", fill="both", expand=True)
mpLabel = Label(rightFrame, text='My Profile: ', font = ('Comic Sans MS',24)).grid(ipadx = 200, columnspan = 2)
nameLabel = Label(rightFrame, text="Name: %s %s" % (fn, ln), font = ('Comic Sans MS',14)).grid(row=1, column=0, sticky = W)
userLabel = Label(rightFrame, text='Username: %s' % un, font = ('Comic Sans MS',14)).grid(row=2, column=0, sticky = W)
emailLabel = Label (rightFrame, text='Email: %s' % em, font = ('Comic Sans MS',14)).grid(row=3, column=0, sticky = W)
sLabel = Label(rightFrame, text='Signup Date: %s' %sData, font = ('Comic Sans MS',14)).grid(row=4, column=0, sticky = W)
disIDLabel = Label(rightFrame, text='DistrictID: %s' % districtID , font = ('Comic Sans MS',14)).grid(row=5, column=0, sticky = W)
ACBLnumLabel = Label(rightFrame, text='ACBL #: %s' % acblNumba, font = ('Comic Sans MS',14)).grid(row=6, column=0, sticky = W)
nothing = Label(rightFrame).grid(row=7, column=0)
msLabel= Label(rightFrame, text='My Stats', font = ('Comic Sans MS',14, 'bold')).grid(row=8, column=0, sticky = W)
dpLabel = Label(rightFrame, text='Deals Played: %s' %dPlay, font = ('Comic Sans MS',14)).grid(row=9, column=0, sticky = W)
levelLabel = Label(rightFrame, text='Level: %s' % lvl, font = ('Comic Sans MS',14)).grid(row=10, column=0, sticky = W)
expLabel = Label(rightFrame, text='Experience: %s' % exp, font = ('Comic Sans MS',14)).grid(row=11, column=0, sticky = W)
coinsLabel = Label(rightFrame, text='Coins: %s' % coins, font = ('Comic Sans MS',14)).grid(row=12, column=0, sticky = W)
tourLabel = Label(rightFrame, text='Tournaments: %s' % tornys, font = ('Comic Sans MS',14)).grid(row=13, column=0, sticky = W)
#b = Button(bottomFrame, text="HOME",font = 'Arial 12').pack(side=LEFT) #FIND A IMAGE OF A HOUSE
quitButton = Button(bottomFrame, text="Go Back", command=top.destroy, font = 'Arial 12').pack(side = RIGHT)
root = Tk()
MainMenu(root).pack(fill="both", expand=True)
root.mainloop()
|
ACBL-Bridge/Bridge-Application
|
Home Files/LoginandSignupV10.py
|
Python
|
mit
| 17,362
|
# -*- coding: utf-8 -*-
from youtrack import YouTrackException
def utf8encode(source):
if isinstance(source, str):
source = source.encode('utf-8')
return source
def _create_custom_field_prototype(connection, cf_type, cf_name, auto_attached=False, additional_params=None):
if additional_params is None:
additional_params = dict([])
field = _get_custom_field(connection, cf_name)
if field is not None:
if field.type != cf_type:
msg = "Custom field with name [ %s ] already exists. It has type [ %s ] instead of [ %s ]" % \
(utf8encode(cf_name), field.type, cf_type)
raise LogicException(msg)
else:
connection.create_custom_field_detailed(cf_name, cf_type, False, True, auto_attached, additional_params)
def _get_custom_field(connection, cf_name):
existing_fields = [item for item in connection.get_custom_fields() if utf8encode(item.name).lower() ==
utf8encode(cf_name).lower()]
if len(existing_fields):
return existing_fields[0]
return None
def create_custom_field(connection, cf_type, cf_name, auto_attached, value_names=None, bundle_policy="0"):
"""
Creates custom field prototype(if not exist) and sets default values bundle if needed
Args:
connection: An opened Connection instance.
cf_type: Type of custom field to be created
cf_name: Name of custom field that should be created (if not exists)
auto_attached: If this field should be auto attached or not.
value_names: Values, that should be attached with this cf by default.
If None, no bundle is created to this field, if empty, empty bundle is created.
bundle_policy: ???
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries.
"""
if (value_names is None) and (not auto_attached or "[" not in cf_type):
_create_custom_field_prototype(connection, cf_type, cf_name, auto_attached)
return
if value_names is None:
value_names = set([])
else:
value_names = set(value_names)
field = _get_custom_field(connection, cf_name)
if field is not None:
if hasattr(field, "defaultBundle"):
bundle = connection.get_bundle(field.type, field.defaultBundle)
elif field.autoAttached:
return
else:
bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type)
else:
bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type)
_create_custom_field_prototype(connection, cf_type, cf_name, auto_attached,
{"defaultBundle": bundle.name,
"attachBundlePolicy": bundle_policy})
for value_name in value_names:
try:
connection.add_value_to_bundle(bundle, value_name)
except YouTrackException:
pass
#
# values_to_add = calculate_missing_value_names(bundle, value_names)
# [connection.addValueToBundle(bundle, name) for name in values_to_add]
# if field is None:
# bundle_name = cf_name + "_bundle"
# _create_bundle_safe(connection, bundle_name, cf_type)
# bundle = connection.getBundle(cf_type, bundle_name)
# values_to_add = calculate_missing_value_names(bundle, value_names)
#
#
# for value in values_to_add:
# connection.addValueToBundle(bundle, value)
#
#
def process_custom_field(connection, project_id, cf_type, cf_name, value_names=None):
"""
Creates custom field and attaches it to the project. If custom field already exists and has type
cf_type it is attached to the project. If it has another type, LogicException is raised. If project field already
exists, uses it and bundle from it. If not, creates project field and bundle with name
<cf_name>_bundle_<project_id> for it.
Adds value_names to bundle.
Args:
connection: An opened Connection instance.
project_id: Id of the project to attach CF to.
cf_type: Type of cf to be created.
cf_name: Name of cf that should be created (if not exists) and attached to the project (if not yet attached)
value_names: Values, that cf must have. If None, does not create any bundle for the field. If empty list,
creates bundle, but does not create any value_names in it. If bundle already contains
some value_names, only value_names that do not already exist are added.
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries.
"""
_create_custom_field_prototype(connection, cf_type, cf_name)
if cf_type[0:-3] not in connection.bundle_types:
value_names = None
elif value_names is None:
value_names = []
existing_project_fields = [item for item in connection.getProjectCustomFields(project_id) if
utf8encode(item.name) == cf_name]
if len(existing_project_fields):
if value_names is None:
return
bundle = connection.getBundle(cf_type, existing_project_fields[0].bundle)
values_to_add = calculate_missing_value_names(bundle, value_names)
else:
if value_names is None:
connection.createProjectCustomFieldDetailed(project_id, cf_name, "No " + cf_name)
return
bundle = create_bundle_safe(connection, cf_name + "_bundle_" + project_id, cf_type)
values_to_add = calculate_missing_value_names(bundle, value_names)
connection.createProjectCustomFieldDetailed(project_id, cf_name, "No " + cf_name,
params={"bundle": bundle.name})
for name in values_to_add:
connection.addValueToBundle(bundle, bundle.createElement(name))
def add_values_to_bundle_safe(connection, bundle, values):
"""
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
"""
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e
def create_bundle_safe(connection, bundle_name, bundle_type):
bundle = connection.bundle_types[bundle_type[0:-3]](None, None)
bundle.name = bundle_name
try:
connection.createBundle(bundle)
except YouTrackException as e:
if e.response.status == 409:
print("Bundle with name [ %s ] already exists" % bundle_name)
else:
raise e
return connection.getBundle(bundle_type, bundle_name)
def calculate_missing_value_names(bundle, value_names):
bundle_elements_names = [elem.name.lower() for elem in bundle.values]
return [value for value in value_names if value.lower() not in bundle_elements_names]
class LogicException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
|
devopshq/youtrack
|
youtrack/import_helper.py
|
Python
|
mit
| 7,567
|
from __future__ import division
|
laowantong/mocodo
|
mocodo/tests/__init__.py
|
Python
|
mit
| 33
|
import inspect
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import six
from .reducers import tuple_reducer, path_reducer, dot_reducer, underscore_reducer
from .splitters import tuple_splitter, path_splitter, dot_splitter, underscore_splitter
REDUCER_DICT = {
"tuple": tuple_reducer,
"path": path_reducer,
"dot": dot_reducer,
"underscore": underscore_reducer,
}
SPLITTER_DICT = {
"tuple": tuple_splitter,
"path": path_splitter,
"dot": dot_splitter,
"underscore": underscore_splitter,
}
def flatten(
d,
reducer="tuple",
inverse=False,
max_flatten_depth=None,
enumerate_types=(),
keep_empty_types=(),
):
"""Flatten `Mapping` object.
Parameters
----------
d : dict-like object
The dict that will be flattened.
reducer : {'tuple', 'path', 'underscore', 'dot', Callable}
The key joining method. If a `Callable` is given, the `Callable` will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys.
'path': Use `os.path.join` to join keys.
'underscore': Use underscores to join keys.
'dot': Use dots to join keys.
inverse : bool
Whether you want invert the resulting key and value.
max_flatten_depth : Optional[int]
Maximum depth to merge.
enumerate_types : Sequence[type]
Flatten these types using `enumerate`.
For example, if we set `enumerate_types` to ``(list,)``,
`list` indices become keys: ``{'a': ['b', 'c']}`` -> ``{('a', 0): 'b', ('a', 1): 'c'}``.
keep_empty_types : Sequence[type]
By default, ``flatten({1: 2, 3: {}})`` will give you ``{(1,): 2}``, that is, the key ``3``
will disappear.
This is also applied for the types in `enumerate_types`, that is,
``flatten({1: 2, 3: []}, enumerate_types=(list,))`` will give you ``{(1,): 2}``.
If you want to keep those empty values, you can specify the types in `keep_empty_types`:
>>> flatten({1: 2, 3: {}}, keep_empty_types=(dict,))
{(1,): 2, (3,): {}}
Returns
-------
flat_dict : dict
"""
enumerate_types = tuple(enumerate_types)
flattenable_types = (Mapping,) + enumerate_types
if not isinstance(d, flattenable_types):
raise ValueError(
"argument type %s is not in the flattenalbe types %s"
% (type(d), flattenable_types)
)
# check max_flatten_depth
if max_flatten_depth is not None and max_flatten_depth < 1:
raise ValueError("max_flatten_depth should not be less than 1.")
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
try:
# Python 3
reducer_accepts_parent_obj = len(inspect.signature(reducer).parameters) == 3
except AttributeError:
# Python 2
reducer_accepts_parent_obj = len(inspect.getargspec(reducer)[0]) == 3
flat_dict = {}
def _flatten(_d, depth, parent=None):
key_value_iterable = (
enumerate(_d) if isinstance(_d, enumerate_types) else six.viewitems(_d)
)
has_item = False
for key, value in key_value_iterable:
has_item = True
if reducer_accepts_parent_obj:
flat_key = reducer(parent, key, _d)
else:
flat_key = reducer(parent, key)
if isinstance(value, flattenable_types) and (
max_flatten_depth is None or depth < max_flatten_depth
):
# recursively build the result
has_child = _flatten(value, depth=depth + 1, parent=flat_key)
if has_child or not isinstance(value, keep_empty_types):
# ignore the key in this level because it already has child key
# or its value is empty
continue
# add an item to the result
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
return has_item
_flatten(d, depth=1)
return flat_dict
def nested_set_dict(d, keys, value):
"""Set a value to a sequence of nested keys.
Parameters
----------
d : Mapping
keys : Sequence[str]
value : Any
"""
assert keys
key = keys[0]
if len(keys) == 1:
if key in d:
raise ValueError("duplicated key '{}'".format(key))
d[key] = value
return
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value)
def unflatten(d, splitter="tuple", inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d : dict-like object
The dict that will be unflattened.
splitter : {'tuple', 'path', 'underscore', 'dot', Callable}
The key splitting method. If a Callable is given, the Callable will be
used to split `d`.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use `pathlib.Path.parts` to split keys.
'underscore': Use underscores to split keys.
'dot': Use underscores to split keys.
inverse : bool
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict : dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict
|
ianlini/flatten-dict
|
src/flatten_dict/flatten_dict.py
|
Python
|
mit
| 5,769
|
from IPython.display import HTML
from bs4 import BeautifulSoup
import urllib
f = open('chars.txt', 'w')
r = urllib.urlopen('http://www.eventhubs.com/tiers/ssb4/').read()
soup = BeautifulSoup(r, "lxml")
characters = soup.find_all("td", class_="tierstdnorm")
count = 1
tierCharList=[]
for element in characters:
if count==1:
tier = element.get_text()
elif count==2:
character = element.get_text()
tierChar = tier + "," + character
tierCharList.append(tierChar)
elif count%12==1:
tier = element.get_text()
elif count%12==2:
character = element.get_text()
tierChar = tier + "," + character
tierCharList.append(tierChar)
count+=1
tierCharList.remove(tierCharList[len(tierCharList)-1])
for x in range(0,len(tierCharList)):
f.write(tierCharList[x])
f.write("\n")
|
bumshakabum/Kim_CSCI2270_FinalProject
|
websiteParser.py
|
Python
|
mit
| 852
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
import os
import sys
import errno
import locale
import re
import tempfile
import shutil
import fnmatch
import functools
from collections import Counter, namedtuple
from multiprocessing.pool import ThreadPool
import traceback
import subprocess
import platform
import shlex
from beets.util import hidden
from unidecode import unidecode
from enum import Enum
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = '\\\\?\\'
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super().__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, str):
return self.reason
elif isinstance(self.reason, bytes):
return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return '"{}"'.format(str(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error('{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super().__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = 'while {} {} to {}'.format(
self._gerund(),
displayable_path(self.paths[0]),
displayable_path(self.paths[1])
)
elif self.verb in ('delete', 'write', 'create', 'read'):
clause = 'while {} {}'.format(
self._gerund(),
displayable_path(self.paths[0])
)
else:
clause = 'during {} of paths {}'.format(
self.verb, ', '.join(displayable_path(p) for p in self.paths)
)
return f'{self._reasonstr()} {clause}'
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out.
"""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
REFLINK = 4
REFLINK_AUTO = 5
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path)
def ancestry(path):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the pathes aren't Unicode strings.
path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warning('could not list directory {}: {}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
if logger:
logger.debug('ignoring {} due to ignore rule {}'.format(
base, pat
))
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
yield from sorted_walk(cur, ignore, ignore_hidden, logger)
def path_as_posix(path):
"""Return the string representation of the path with forward (/)
slashes.
"""
return path.replace(b'\\', b'/')
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except OSError as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
try:
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing).
shutil.rmtree(directory)
else:
break
except OSError:
break
def components(path):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8'
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
"""
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if encoding == 'mbcs':
# On Windows, a broken encoding known to Python as "MBCS" is
# used for the filesystem. However, we only use the Unicode API
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = 'utf-8'
return encoding
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to utf-8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf-8')
PATH_SEP = bytestring_path(os.sep)
def displayable_path(path, separator='; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, str):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return str(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf-8', 'ignore')
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, str):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf-8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith('\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = 'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except OSError as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except OSError as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if os.path.isdir(syspath(path)):
raise FilesystemError(u'source is directory', 'move', (path, dest))
if os.path.isdir(syspath(dest)):
raise FilesystemError(u'destination is directory', 'move',
(path, dest))
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
# First, try renaming the file.
try:
os.replace(syspath(path), syspath(dest))
except OSError:
# Copy the file to a temporary destination.
basename = os.path.basename(bytestring_path(dest))
dirname = os.path.dirname(bytestring_path(dest))
tmp = tempfile.NamedTemporaryFile(
suffix=syspath(b'.beets', prefix=False),
prefix=syspath(b'.' + basename, prefix=False),
dir=syspath(dirname),
delete=False,
)
try:
with open(syspath(path), 'rb') as f:
shutil.copyfileobj(f, tmp)
finally:
tmp.close()
# Move the copied file into place.
try:
os.replace(tmp.name, syspath(dest))
tmp = None
os.remove(syspath(path))
except OSError as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
finally:
if tmp is not None:
os.remove(tmp)
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError('OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = 'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def hardlink(path, dest, replace=False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError('OS does not support hard links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError('Cannot hard link across devices.'
'link', (path, dest), traceback.format_exc())
else:
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def reflink(path, dest, replace=False, fallback=False):
"""Create a reflink from `dest` to `path`.
Raise an `OSError` if `dest` already exists, unless `replace` is
True. If `path` == `dest`, then do nothing.
If reflinking fails and `fallback` is enabled, try copying the file
instead. Otherwise, raise an error without trying a plain copy.
May raise an `ImportError` if the `reflink` module is not available.
"""
import reflink as pyreflink
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
pyreflink.reflink(path, dest)
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
if fallback:
copy(path, dest, replace)
else:
raise FilesystemError('OS/filesystem does not support reflinks.',
'link', (path, dest), traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(br'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
suffix = f'.{num}'.encode() + ext
new_path = base + suffix
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r'[\\/]'), '_'), # / and \ -- forbidden everywhere.
(re.compile(r'^\.'), '_'), # Leading dot (hidden files on Unix).
(re.compile(r'[\x00-\x1f]'), ''), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), '_'), # Windows "reserved characters".
(re.compile(r'\.$'), '_'), # Trailing dots.
(re.compile(r'\s+$'), ''), # Trailing whitespace.
]
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension
is preserved.
"""
comps = components(path)
out = [c[:length] for c in comps]
base, ext = os.path.splitext(comps[-1])
if ext:
# Last component has an extension.
base = base[:length - len(ext)]
out[-1] = base + ext
return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf-8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, str):
return path
assert isinstance(path, bytes)
return os.fsdecode(path)
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ('yes', '1', 'true', 't', 'y')
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return ''
elif isinstance(value, memoryview):
return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf-8', 'ignore')
else:
return str(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0]
def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT
threads) in the system.
"""
# Adapted from the soundconverter project:
# https://github.com/kassoulet/soundconverter
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == 'darwin':
try:
num = int(command_output([
'/usr/sbin/sysctl',
'-n',
'hw.ncpu',
]).stdout)
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
return 1
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
# stdout and stderr as bytes
CommandOutput = namedtuple("CommandOutput", ("stdout", "stderr"))
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
byte strings of the respective output streams.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=' '.join(cmd),
output=stdout + stderr,
)
return CommandOutput(stdout, stderr)
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if hasattr(os, 'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def editor_command():
"""Get a command for opening a text file.
Use the `EDITOR` environment variable by default. If it is not
present, fall back to `open_anything()`, the platform-specific tool
for opening files in general.
"""
editor = os.environ.get('EDITOR')
if editor:
return editor
return open_anything()
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex.split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, str):
short_path = short_path.decode(_fsencoding())
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
def raw_seconds_short(string):
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match(r'^(\d+):([0-5]\d)$', string)
if not match:
raise ValueError('String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components)
def par_map(transform, items):
"""Apply the function `transform` to all the elements in the
iterable `items`, like `map(transform, items)` but with no return
value. The map *might* happen in parallel: it's parallel on Python 3
and sequential on Python 2.
The parallelism uses threads (not processes), so this is only useful
for IO-bound `transform`s.
"""
pool = ThreadPool()
pool.map(transform, items)
pool.close()
pool.join()
def lazy_property(func):
"""A decorator that creates a lazily evaluated property. On first access,
the property is assigned the return value of `func`. This first value is
stored, so that future accesses do not have to evaluate `func` again.
This behaviour is useful when `func` is expensive to evaluate, and it is
not certain that the result will be needed.
"""
field_name = '_' + func.__name__
@property
@functools.wraps(func)
def wrapper(self):
if hasattr(self, field_name):
return getattr(self, field_name)
value = func(self)
setattr(self, field_name, value)
return value
return wrapper
def decode_commandline_path(path):
"""Prepare a path for substitution into commandline template.
On Python 3, we need to construct the subprocess commands to invoke as a
Unicode string. On Unix, this is a little unfortunate---the OS is
expecting bytes---so we use surrogate escaping and decode with the
argument encoding, which is the same encoding that will then be
*reversed* to recover the same bytes before invoking the OS. On
Windows, we want to preserve the Unicode filename "as is."
"""
# On Python 3, the template is a Unicode string, which only supports
# substitution of Unicode variables.
if platform.system() == 'Windows':
return path.decode(_fsencoding())
else:
return path.decode(arg_encoding(), 'surrogateescape')
|
beetbox/beets
|
beets/util/__init__.py
|
Python
|
mit
| 37,269
|
import pandas as pd
import numpy as np
import re
from gensim import corpora, models, similarities
from gensim.parsing.preprocessing import STOPWORDS
def split(text):
'''
Split the input text into words/tokens; ignoring stopwords and empty strings
'''
delimiters = ".", ",", ";", ":", "-", "(", ")", " ", "\t"
regexPattern = '|'.join(map(re.escape, delimiters))
return [word for word in re.split(regexPattern, text.lower()) if word not in STOPWORDS and word != ""]
def main():
# Load data
df_train = pd.read_csv('data/train.csv', encoding="ISO-8859-1")
df_desc = pd.read_csv('data/product_descriptions.csv', encoding="ISO-8859-1")
df_attr = pd.read_csv('data/attributes_combined.csv', encoding="ISO-8859-1")
# split the texts
titles = [split(line) for line in df_train["product_title"]]
descs = [split(line) for line in df_desc["product_description"]]
attrs = [[str(line)] if isinstance(line, float) else split(line) for line in df_attr["attr_value"]]
queries = [split(line) for line in df_train["search_term"]]
texts = np.concatenate((titles, descs, attrs, queries))
# remove infrequent words
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 2] for text in texts]
# build dictionary
dictionary = corpora.Dictionary(texts)
dictionary.save('homedepot.dict')
print dictionary
# actually build a bag-of-words corpus
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('homedepot.mm', corpus)
# build Tf-idf model
tfidf = models.TfidfModel(corpus)
tfidf.save('homedepot.tfidf')
if __name__ == "__main__":
main()
|
CSC591ADBI-TeamProjects/Product-Search-Relevance
|
build_tfidf.py
|
Python
|
mit
| 1,752
|
# -*- coding: utf-8 -*-
import win32process
import win32api
import win32con
import ctypes
import os, sys, string
TH32CS_SNAPPROCESS = 0x00000002
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.c_ulong),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
def getProcList():
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
if Process32First(hProcessSnap,ctypes.byref(pe32)) == False:
return
while True:
yield pe32
if Process32Next(hProcessSnap,ctypes.byref(pe32)) == False:
break
CloseHandle(hProcessSnap)
def GetProcessModules( pid ):
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, pid )
hModule = win32process.EnumProcessModules(handle)
temp=[]
for i in hModule:
temp.append([hex(i),debugfile(win32process.GetModuleFileNameEx(handle,i))])
win32api.CloseHandle(handle)
return temp
def CloseProcess( pid ):
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, pid )
exitcode = win32process.GetExitCodeProcess( handle )
win32api.TerminateProcess(handle, exitcode)
win32api.CloseHandle(handle)
def debugfile(file):
if (file.split("\\")[-1]=="smss.exe"):
file = "C:\\WINDOWS\\system32\\smss.exe"
return file
elif (file.split("\\")[-1]=="csrss.exe"):
file = "C:\\WINDOWS\\system32\\csrss.exe"
return file
elif (file.split("\\")[-1]=="winlogon.exe"):
file = "C:\\WINDOWS\\system32\\winlogon.exe"
return file
else:
return file
if __name__ =='__main__':
#调用procup.dll的enableDebugPriv函数对本进程提权
procupdll=ctypes.cdll.LoadLibrary("InjectAssist.dll")
self_pid = procupdll.GetPIDbyName('services.exe')
print self_pid
if procupdll.EnableOpenprocPriv()==0:
print "提权失败"
count = 0
procList = getProcList()
for proc in procList:
count+=1
print("name=%s\tfather=%d\tid=%d" % (proc.szExeFile, proc.th32ParentProcessID, proc.th32ProcessID))
try:
TempGet=GetProcessModules(proc.th32ProcessID)
except Exception, e:
print "pid:%d can't read"%(proc.th32ProcessID)
continue
#TempGet[0][1].split("\\")[-1] 路径的最后一部分
#'''
#枚举进程调用所有模块
for tempnum in range(0,len(TempGet)):
try:
print TempGet
except Exception,e:
print e
#'''
print "进程数:%d"%(count)
|
holdlg/PythonScript
|
Python2/bak_2014/sys3_process.py
|
Python
|
mit
| 3,492
|
from evosnap import constants
class POSDevice:
def __init__(self,**kwargs):
self.__order = [
'posDeviceType', 'posDeviceConnection', 'posDeviceColour', 'posDeviceQuantity',
]
self.__lower_camelcase = constants.ALL_FIELDS
self.pos_device_type = kwargs.get('pos_device_type')
self.pos_device_connection = kwargs.get('pos_device_connection')
self.pos_device_colour = kwargs.get('pos_device_colour')
self.pos_device_quantity = kwargs.get('pos_device_quantity')
@property
def hash_str(self):
required = [
'pos_device_type', 'pos_device_connection', 'pos_device_colour', 'pos_device_quantity',
]
return ''.join([str(getattr(self,f)).strip() for f in required if getattr(self,f) is not None])
|
Zertifica/evosnap
|
evosnap/merchant_applications/pos_device.py
|
Python
|
mit
| 806
|
import numpy as np
from snob import mixture_slf as slf
n_samples, n_features, n_clusters, rank = 1000, 50, 6, 1
sigma = 0.5
true_homo_specific_variances = sigma**2 * np.ones((1, n_features))
rng = np.random.RandomState(321)
U, _, _ = np.linalg.svd(rng.randn(n_features, n_features))
true_factor_loads = U[:, :rank].T
true_factor_scores = rng.randn(n_samples, rank)
X = np.dot(true_factor_scores, true_factor_loads)
# Assign objects to different clusters.
indices = rng.randint(0, n_clusters, size=n_samples)
true_weights = np.zeros(n_clusters)
true_means = rng.randn(n_clusters, n_features)
for index in range(n_clusters):
X[indices==index] += true_means[index]
true_weights[index] = (indices==index).sum()
true_weights = true_weights/n_samples
# Adding homoscedastic noise
bar = rng.randn(n_samples, n_features)
X_homo = X + sigma * bar
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
true_hetero_specific_variances = sigmas**2
data = X_hetero
model = slf.SLFGMM(n_clusters)
model.fit(data)
def scatter_common(x, y, title=None):
fig, ax = plt.subplots()
ax.scatter(x,y)
ax.set_title(title or "")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (limits.min(), limits.max())
ax.plot(limits, limits, c="#666666", linestyle=":", linewidth=0.5, zorder=-1)
ax.set_xlim(limits)
ax.set_ylim(limits)
return fig
scatter_common(true_factor_loads, model.factor_loads, "factor loads")
scatter_common(true_factor_scores, model.factor_scores, "factor scores")
scatter_common(true_homo_specific_variances, model.specific_variances, "specific variances")
# means
# This one is tricky because the indices are not necessarily the same.
# So just take whichever is closest.
idx = np.zeros(n_clusters, dtype=int)
for index, true_mean in enumerate(true_means):
distance = np.sum(np.abs(model._means - true_mean), axis=1) \
+ np.abs(model.weights.flatten()[index] - true_weights)
idx[index] = np.argmin(distance)
assert len(idx) == len(set(idx))
true = true_means.flatten()
inferred = model._means[idx].flatten()
scatter_common(true, inferred, "means")
# Plot some data...
fig, ax = plt.subplots()
ax.scatter(data[:, 0], data[:, 1], facecolor="g")
raise a
# factor scores
ax = axes[1]
true = true_factor_scores.flatten()
inferred = model._factor_scores.flatten()
ax.scatter(true, inferred)
# factor loads
ax = axes[2]
true = true_factor_loads.flatten()
inferred = model._factor_loads.flatten()
ax.scatter(true, inferred)
raise a
true = np.hstack([each.flatten() for each in (true_means, true_factor_scores, true_factor_loads, true_specific_variances)])
inferred = np.hstack([each.flatten() for each in (model.means, model.factor_scores, model.factor_loads, model.specific_variances)])
fig, ax = plt.subplots()
ax.scatter(true, inferred, alpha=0.5)
raise a
|
andycasey/snob
|
sandbox_mixture_slf.py
|
Python
|
mit
| 2,941
|
import os
import shutil
from codecs import open as codecs_open
import numpy as np
from setuptools import setup, find_packages
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from distutils import errors
from Cython.Build import cythonize
from Cython.Compiler.Errors import CompileError
def check_for_openmp():
"""
There does not seem to be a cross platform and standard way to check for
OpenMP support. Attempt to compile a test script. Proceed with OpenMP
implementation if it works.
"""
distribution = Distribution()
ext_options = {
'extra_compile_args': ['-fopenmp'],
'extra_link_args': ['-fopenmp']
}
extensions = [
Extension('geoblend.openmp_check', ['geoblend/openmp_check.pyx'], **ext_options)
]
build_extension = build_ext(distribution)
build_extension.finalize_options()
build_extension.extensions = cythonize(extensions, force=True)
build_extension.run()
ext_options = {
'include_dirs': [ np.get_include() ]
}
extensions = [
Extension('geoblend.vector', ['geoblend/vector.pyx'], **ext_options),
Extension('geoblend.convolve', ['geoblend/convolve.pyx'], **ext_options)
]
pkg_dir = os.path.dirname(os.path.realpath(__file__))
dst = os.path.join(pkg_dir, 'geoblend', 'coefficients.pyx')
try:
check_for_openmp()
ext_options['extra_compile_args'] = ['-fopenmp']
ext_options['extra_link_args'] = ['-fopenmp']
src = os.path.join(pkg_dir, 'geoblend', '_coefficients_omp.pyx')
except (errors.LinkError, errors.CompileError, CompileError):
src = os.path.join(pkg_dir, 'geoblend', '_coefficients.pyx')
shutil.copy(src, dst)
extensions.append(
Extension('geoblend.coefficients', ['geoblend/coefficients.pyx'], **ext_options),
)
# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(name='geoblend',
version='0.2.3',
description=u"Geo-aware poisson blending.",
long_description=long_description,
classifiers=[],
keywords='',
author=u"Amit Kapadia",
author_email='amit@planet.com',
url='https://github.com/kapadia/geoblend',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
ext_modules=cythonize(extensions),
zip_safe=False,
install_requires=[
'click',
# 'rasterio',
'pyamg',
'scipy',
'scikit-image'
],
extras_require={
'test': ['pytest'],
'development': [
'cython>=0.23.0',
'benchmark'
]
},
entry_points="""
[console_scripts]
geoblend=geoblend.scripts.cli:geoblend
"""
)
|
kapadia/geoblend
|
setup.py
|
Python
|
mit
| 2,841
|
# vim: ts=4 sw=4 et ai:
"""This module implements all contexts for state handling during uploads and
downloads, the main interface to which being the TftpContext base class.
The concept is simple. Each context object represents a single upload or
download, and the state object in the context object represents the current
state of that transfer. The state object has a handle() method that expects
the next packet in the transfer, and returns a state object until the transfer
is complete, at which point it returns None. That is, unless there is a fatal
error, in which case a TftpException is returned instead."""
import logging
import os
import socket
import sys
import time
from .TftpPacketFactory import TftpPacketFactory
from .TftpPacketTypes import *
from .TftpShared import *
from .TftpStates import *
log = logging.getLogger("tftpy.TftpContext")
###############################################################################
# Utility classes
###############################################################################
class TftpMetrics:
"""A class representing metrics of the transfer."""
def __init__(self):
# Bytes transferred
self.bytes = 0
# Bytes re-sent
self.resent_bytes = 0
# Duplicate packets received
self.dups = {}
self.dupcount = 0
# Times
self.start_time = 0
self.end_time = 0
self.duration = 0
# Rates
self.bps = 0
self.kbps = 0
# Generic errors
self.errors = 0
def compute(self):
# Compute transfer time
self.duration = self.end_time - self.start_time
if self.duration == 0:
self.duration = 1
log.debug("TftpMetrics.compute: duration is %s", self.duration)
self.bps = (self.bytes * 8.0) / self.duration
self.kbps = self.bps / 1024.0
log.debug("TftpMetrics.compute: kbps is %s", self.kbps)
for key in self.dups:
self.dupcount += self.dups[key]
def add_dup(self, pkt):
"""This method adds a dup for a packet to the metrics."""
log.debug("Recording a dup of %s", pkt)
s = str(pkt)
if s in self.dups:
self.dups[s] += 1
else:
self.dups[s] = 1
tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached")
###############################################################################
# Context classes
###############################################################################
class TftpContext:
"""The base class of the contexts."""
def __init__(self, host, port, timeout, retries=DEF_TIMEOUT_RETRIES, localip=""):
"""Constructor for the base context, setting shared instance
variables."""
self.file_to_transfer = None
self.fileobj = None
self.options = None
self.packethook = None
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if localip != "":
self.sock.bind((localip, 0))
self.sock.settimeout(timeout)
self.timeout = timeout
self.retries = retries
self.state = None
self.next_block = 0
self.factory = TftpPacketFactory()
# Note, setting the host will also set self.address, as it's a property.
self.host = host
self.port = port
# The port associated with the TID
self.tidport = None
# Metrics
self.metrics = TftpMetrics()
# Fluag when the transfer is pending completion.
self.pending_complete = False
# Time when this context last received any traffic.
# FIXME: does this belong in metrics?
self.last_update = 0
# The last packet we sent, if applicable, to make resending easy.
self.last_pkt = None
# Count the number of retry attempts.
self.retry_count = 0
def getBlocksize(self):
"""Fetch the current blocksize for this session."""
return int(self.options.get("blksize", 512))
def __del__(self):
"""Simple destructor to try to call housekeeping in the end method if
not called explicitly. Leaking file descriptors is not a good
thing."""
self.end()
def checkTimeout(self, now):
"""Compare current time with last_update time, and raise an exception
if we're over the timeout time."""
log.debug("checking for timeout on session %s", self)
if now - self.last_update > self.timeout:
raise TftpTimeout("Timeout waiting for traffic")
def start(self):
raise NotImplementedError("Abstract method")
def end(self, close_fileobj=True):
"""Perform session cleanup, since the end method should always be
called explicitly by the calling code, this works better than the
destructor.
Set close_fileobj to False so fileobj can be returned open."""
log.debug("in TftpContext.end - closing socket")
self.sock.close()
if close_fileobj and self.fileobj is not None and not self.fileobj.closed:
log.debug("self.fileobj is open - closing")
self.fileobj.close()
def gethost(self):
"""
Simple getter method for use in a property.
"""
return self.__host
def sethost(self, host):
"""
Setter method that also sets the address property as a result
of the host that is set.
"""
self.__host = host
self.address = socket.gethostbyname(host)
host = property(gethost, sethost)
def setNextBlock(self, block):
if block >= 2 ** 16:
log.debug("Block number rollover to 0 again")
block = 0
self.__eblock = block
def getNextBlock(self):
return self.__eblock
next_block = property(getNextBlock, setNextBlock)
def cycle(self):
"""
Here we wait for a response from the server after sending it
something, and dispatch appropriate action to that response.
"""
try:
(buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE)
except socket.timeout:
log.warning("Timeout waiting for traffic, retrying...")
raise TftpTimeout("Timed-out waiting for traffic")
# Ok, we've received a packet. Log it.
log.debug("Received %d bytes from %s:%s", len(buffer), raddress, rport)
# And update our last updated time.
self.last_update = time.time()
# Decode it.
recvpkt = self.factory.parse(buffer)
# Check for known "connection".
if raddress != self.address:
log.warning(
"Received traffic from %s, expected host %s. Discarding"
% (raddress, self.host)
)
if self.tidport and self.tidport != rport:
log.warning(
"Received traffic from %s:%s but we're "
"connected to %s:%s. Discarding."
% (raddress, rport, self.host, self.tidport)
)
# If there is a packethook defined, call it. We unconditionally
# pass all packets, it's up to the client to screen out different
# kinds of packets. This way, the client is privy to things like
# negotiated options.
if self.packethook:
self.packethook(recvpkt)
# And handle it, possibly changing state.
self.state = self.state.handle(recvpkt, raddress, rport)
# If we didn't throw any exceptions here, reset the retry_count to
# zero.
self.retry_count = 0
class TftpContextServer(TftpContext):
"""The context for the server."""
def __init__(
self,
host,
port,
timeout,
root,
dyn_file_func=None,
upload_open=None,
retries=DEF_TIMEOUT_RETRIES,
):
TftpContext.__init__(self, host, port, timeout, retries)
# At this point we have no idea if this is a download or an upload. We
# need to let the start state determine that.
self.state = TftpStateServerStart(self)
self.root = root
self.dyn_file_func = dyn_file_func
self.upload_open = upload_open
def __str__(self):
return f"{self.host}:{self.port} {self.state}"
def start(self, buffer):
"""
Start the state cycle. Note that the server context receives an
initial packet in its start method. Also note that the server does not
loop on cycle(), as it expects the TftpServer object to manage
that.
"""
log.debug("In TftpContextServer.start")
self.metrics.start_time = time.time()
log.debug("Set metrics.start_time to %s", self.metrics.start_time)
# And update our last updated time.
self.last_update = time.time()
pkt = self.factory.parse(buffer)
log.debug("TftpContextServer.start() - factory returned a %s", pkt)
# Call handle once with the initial packet. This should put us into
# the download or the upload state.
self.state = self.state.handle(pkt, self.host, self.port)
def end(self):
"""Finish up the context."""
TftpContext.end(self)
self.metrics.end_time = time.time()
log.debug("Set metrics.end_time to %s", self.metrics.end_time)
self.metrics.compute()
class TftpContextClientUpload(TftpContext):
"""The upload context for the client during an upload.
Note: If input is a hyphen, then we will use stdin."""
def __init__(
self,
host,
port,
filename,
input,
options,
packethook,
timeout,
retries=DEF_TIMEOUT_RETRIES,
localip="",
):
TftpContext.__init__(self, host, port, timeout, retries, localip)
self.file_to_transfer = filename
self.options = options
self.packethook = packethook
# If the input object has a read() function,
# assume it is file-like.
if hasattr(input, "read"):
self.fileobj = input
elif input == "-":
self.fileobj = sys.stdin.buffer
else:
self.fileobj = open(input, "rb")
log.debug("TftpContextClientUpload.__init__()")
log.debug(
"file_to_transfer = %s, options = %s"
% (self.file_to_transfer, self.options)
)
def __str__(self):
return f"{self.host}:{self.port} {self.state}"
def start(self):
log.info("Sending tftp upload request to %s" % self.host)
log.info(" filename -> %s" % self.file_to_transfer)
log.info(" options -> %s" % self.options)
self.metrics.start_time = time.time()
log.debug("Set metrics.start_time to %s" % self.metrics.start_time)
# FIXME: put this in a sendWRQ method?
pkt = TftpPacketWRQ()
pkt.filename = self.file_to_transfer
pkt.mode = "octet" # FIXME - shouldn't hardcode this
pkt.options = self.options
self.sock.sendto(pkt.encode().buffer, (self.host, self.port))
self.next_block = 1
self.last_pkt = pkt
# FIXME: should we centralize sendto operations so we can refactor all
# saving of the packet to the last_pkt field?
self.state = TftpStateSentWRQ(self)
while self.state:
try:
log.debug("State is %s" % self.state)
self.cycle()
except TftpTimeout as err:
log.error(str(err))
self.retry_count += 1
if self.retry_count >= self.retries:
log.debug("hit max retries, giving up")
raise
else:
log.warning("resending last packet")
self.state.resendLast()
def end(self):
"""Finish up the context."""
TftpContext.end(self)
self.metrics.end_time = time.time()
log.debug("Set metrics.end_time to %s" % self.metrics.end_time)
self.metrics.compute()
class TftpContextClientDownload(TftpContext):
"""The download context for the client during a download.
Note: If output is a hyphen, then the output will be sent to stdout."""
def __init__(
self,
host,
port,
filename,
output,
options,
packethook,
timeout,
retries=DEF_TIMEOUT_RETRIES,
localip="",
):
TftpContext.__init__(self, host, port, timeout, retries, localip)
# FIXME: should we refactor setting of these params?
self.file_to_transfer = filename
self.options = options
self.packethook = packethook
self.filelike_fileobj = False
# If the output object has a write() function,
# assume it is file-like.
if hasattr(output, "write"):
self.fileobj = output
self.filelike_fileobj = True
# If the output filename is -, then use stdout
elif output == "-":
self.fileobj = sys.stdout
self.filelike_fileobj = True
else:
self.fileobj = open(output, "wb")
log.debug("TftpContextClientDownload.__init__()")
log.debug(
"file_to_transfer = %s, options = %s"
% (self.file_to_transfer, self.options)
)
def __str__(self):
return f"{self.host}:{self.port} {self.state}"
def start(self):
"""Initiate the download."""
log.info("Sending tftp download request to %s" % self.host)
log.info(" filename -> %s" % self.file_to_transfer)
log.info(" options -> %s" % self.options)
self.metrics.start_time = time.time()
log.debug("Set metrics.start_time to %s" % self.metrics.start_time)
# FIXME: put this in a sendRRQ method?
pkt = TftpPacketRRQ()
pkt.filename = self.file_to_transfer
pkt.mode = "octet" # FIXME - shouldn't hardcode this
pkt.options = self.options
self.sock.sendto(pkt.encode().buffer, (self.host, self.port))
self.next_block = 1
self.last_pkt = pkt
self.state = TftpStateSentRRQ(self)
while self.state:
try:
log.debug("State is %s" % self.state)
self.cycle()
except TftpTimeout as err:
log.error(str(err))
self.retry_count += 1
if self.retry_count >= self.retries:
log.debug("hit max retries, giving up")
raise
else:
log.warning("resending last packet")
self.state.resendLast()
except TftpFileNotFoundError as err:
# If we received file not found, then we should not save the open
# output file or we'll be left with a size zero file. Delete it,
# if it exists.
log.error("Received File not found error")
if self.fileobj is not None and not self.filelike_fileobj:
if os.path.exists(self.fileobj.name):
log.debug("unlinking output file of %s", self.fileobj.name)
os.unlink(self.fileobj.name)
raise
def end(self):
"""Finish up the context."""
TftpContext.end(self, not self.filelike_fileobj)
self.metrics.end_time = time.time()
log.debug("Set metrics.end_time to %s" % self.metrics.end_time)
self.metrics.compute()
|
msoulier/tftpy
|
tftpy/TftpContexts.py
|
Python
|
mit
| 15,584
|
import base64
import fnmatch
import glob
import json
import os
import re
import shutil
import stat
import subprocess
import urllib.parse
import warnings
from datetime import datetime, timedelta
from distutils.util import strtobool
from distutils.version import LooseVersion
from typing import Tuple, Any, Union, List, Dict, Optional
from zipfile import ZipFile, ZIP_DEFLATED
import git
import google.auth
import sys
import yaml
from google.cloud import storage
import Tests.Marketplace.marketplace_statistics as mp_statistics
from Tests.Marketplace.marketplace_constants import PackFolders, Metadata, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \
PackTags, PackIgnored, Changelog
from Utils.release_notes_generator import aggregate_release_notes_for_marketplace
from Tests.scripts.utils import logging_wrapper as logging
class Pack(object):
""" Class that manipulates and manages the upload of pack's artifact and metadata to cloud storage.
Args:
pack_name (str): Pack root folder name.
pack_path (str): Full path to pack folder.
Attributes:
PACK_INITIAL_VERSION (str): pack initial version that will be used as default.
CHANGELOG_JSON (str): changelog json full name, may be changed in the future.
README (str): pack's readme file name.
METADATA (str): pack's metadata file name, the one that will be deployed to cloud storage.
USER_METADATA (str); user metadata file name, the one that located in content repo.
EXCLUDE_DIRECTORIES (list): list of directories to excluded before uploading pack zip to storage.
AUTHOR_IMAGE_NAME (str): author image file name.
RELEASE_NOTES (str): release notes folder name.
"""
PACK_INITIAL_VERSION = "1.0.0"
CHANGELOG_JSON = "changelog.json"
README = "README.md"
USER_METADATA = "pack_metadata.json"
METADATA = "metadata.json"
AUTHOR_IMAGE_NAME = "Author_image.png"
EXCLUDE_DIRECTORIES = [PackFolders.TEST_PLAYBOOKS.value]
RELEASE_NOTES = "ReleaseNotes"
def __init__(self, pack_name, pack_path):
self._pack_name = pack_name
self._pack_path = pack_path
self._status = None
self._public_storage_path = ""
self._remove_files_list = [] # tracking temporary files, in order to delete in later step
self._server_min_version = "99.99.99" # initialized min version
self._latest_version = None # pack latest version found in changelog
self._support_type = None # initialized in load_user_metadata function
self._current_version = None # initialized in load_user_metadata function
self._hidden = False # initialized in load_user_metadata function
self._description = None # initialized in load_user_metadata function
self._display_name = None # initialized in load_user_metadata function
self._user_metadata = None # initialized in load_user_metadata function
self.eula_link = None # initialized in load_user_metadata function
self._is_feed = False # a flag that specifies if pack is a feed pack
self._downloads_count = 0 # number of pack downloads
self._bucket_url = None # URL of where the pack was uploaded.
self._aggregated = False # weather the pack's rn was aggregated or not.
self._aggregation_str = "" # the aggregation string msg when the pack versions are aggregated
self._create_date = None # initialized in enhance_pack_attributes function
self._update_date = None # initialized in enhance_pack_attributes function
self._uploaded_author_image = False # whether the pack author image was uploaded or not
self._uploaded_integration_images = [] # the list of all integration images that were uploaded for the pack
self._support_details = None # initialized in enhance_pack_attributes function
self._author = None # initialized in enhance_pack_attributes function
self._certification = None # initialized in enhance_pack_attributes function
self._legacy = None # initialized in enhance_pack_attributes function
self._author_image = None # initialized in upload_author_image function
self._displayed_integration_images = None # initialized in upload_integration_images function
self._price = 0 # initialized in enhance_pack_attributes function
self._is_private_pack = False # initialized in enhance_pack_attributes function
self._is_premium = False # initialized in enhance_pack_attributes function
self._vendor_id = None # initialized in enhance_pack_attributes function
self._partner_id = None # initialized in enhance_pack_attributes function
self._partner_name = None # initialized in enhance_pack_attributes function
self._content_commit_hash = None # initialized in enhance_pack_attributes function
self._preview_only = None # initialized in enhance_pack_attributes function
self._tags = None # initialized in enhance_pack_attributes function
self._categories = None # initialized in enhance_pack_attributes function
self._content_items = None # initialized in collect_content_items function
self._search_rank = None # initialized in enhance_pack_attributes function
self._related_integration_images = None # initialized in enhance_pack_attributes function
self._use_cases = None # initialized in enhance_pack_attributes function
self._keywords = None # initialized in enhance_pack_attributes function
self._dependencies = None # initialized in enhance_pack_attributes function
self._pack_statistics_handler = None # initialized in enhance_pack_attributes function
self._contains_transformer = False # initialized in collect_content_items function
self._contains_filter = False # initialized in collect_content_items function
self._is_missing_dependencies = False # a flag that specifies if pack is missing dependencies
@property
def name(self):
""" str: pack root folder name.
"""
return self._pack_name
@property
def path(self):
""" str: pack folder full path.
"""
return self._pack_path
@property
def latest_version(self):
""" str: pack latest version from sorted keys of changelog.json file.
"""
if not self._latest_version:
self._latest_version = self._get_latest_version()
return self._latest_version
else:
return self._latest_version
@latest_version.setter
def latest_version(self, latest_version):
self._latest_version = latest_version
@property
def status(self):
""" str: current status of the packs.
"""
return self._status
@property
def is_feed(self):
"""
bool: whether the pack is a feed pack
"""
return self._is_feed
@is_feed.setter
def is_feed(self, is_feed):
""" setter of is_feed
"""
self._is_feed = is_feed
@status.setter # type: ignore[attr-defined,no-redef]
def status(self, status_value):
""" setter of pack current status.
"""
self._status = status_value
@property
def public_storage_path(self):
""" str: public gcs path of uploaded pack.
"""
return self._public_storage_path
@public_storage_path.setter
def public_storage_path(self, path_value):
""" setter of public gcs path of uploaded pack.
"""
self._public_storage_path = path_value
@property
def support_type(self):
""" str: support type of the pack.
"""
return self._support_type
@support_type.setter
def support_type(self, support_value):
""" setter of support type of the pack.
"""
self._support_type = support_value
@property
def current_version(self):
""" str: current version of the pack (different from latest_version).
"""
return self._current_version
@current_version.setter
def current_version(self, current_version_value):
""" setter of current version of the pack.
"""
self._current_version = current_version_value
@property
def hidden(self):
""" bool: internal content field for preventing pack from being displayed.
"""
return self._hidden
@hidden.setter
def hidden(self, hidden_value):
""" setter of hidden property of the pack.
"""
self._hidden = hidden_value
@property
def description(self):
""" str: Description of the pack (found in pack_metadata.json).
"""
return self._description
@description.setter
def description(self, description_value):
""" setter of description property of the pack.
"""
self._description = description_value
@property
def display_name(self):
""" str: Display name of the pack (found in pack_metadata.json).
"""
return self._display_name
@property
def user_metadata(self):
""" dict: the pack_metadata.
"""
return self._user_metadata
@display_name.setter # type: ignore[attr-defined,no-redef]
def display_name(self, display_name_value):
""" setter of display name property of the pack.
"""
self._display_name = display_name_value
@property
def server_min_version(self):
""" str: server min version according to collected items.
"""
if not self._server_min_version or self._server_min_version == "99.99.99":
return Metadata.SERVER_DEFAULT_MIN_VERSION
else:
return self._server_min_version
@property
def downloads_count(self):
""" str: packs downloads count.
"""
return self._downloads_count
@downloads_count.setter
def downloads_count(self, download_count_value):
""" setter of downloads count property of the pack.
"""
self._downloads_count = download_count_value
@property
def bucket_url(self):
""" str: pack bucket_url.
"""
return self._bucket_url
@bucket_url.setter
def bucket_url(self, bucket_url):
""" str: pack bucket_url.
"""
self._bucket_url = bucket_url
@property
def aggregated(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregated
@property
def aggregation_str(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregation_str
@property
def create_date(self):
""" str: pack create date.
"""
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def update_date(self):
""" str: pack update date.
"""
return self._update_date
@update_date.setter
def update_date(self, value):
self._update_date = value
@property
def uploaded_author_image(self):
""" bool: whether the pack author image was uploaded or not.
"""
return self._uploaded_author_image
@uploaded_author_image.setter
def uploaded_author_image(self, uploaded_author_image):
""" bool: whether the pack author image was uploaded or not.
"""
self._uploaded_author_image = uploaded_author_image
@property
def uploaded_integration_images(self):
""" str: the list of uploaded integration images
"""
return self._uploaded_integration_images
@property
def is_missing_dependencies(self):
return self._is_missing_dependencies
def _get_latest_version(self):
""" Return latest semantic version of the pack.
In case that changelog.json file was not found, default value of 1.0.0 will be returned.
Otherwise, keys of semantic pack versions will be collected and sorted in descending and return latest version.
For additional information regarding changelog.json format go to issue #19786
Returns:
str: Pack latest version.
"""
changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if not os.path.exists(changelog_path):
return self.PACK_INITIAL_VERSION
with open(changelog_path, "r") as changelog_file:
changelog = json.load(changelog_file)
pack_versions = [LooseVersion(v) for v in changelog.keys()]
pack_versions.sort(reverse=True)
return pack_versions[0].vstring
@staticmethod
def organize_integration_images(pack_integration_images: list, pack_dependencies_integration_images_dict: dict,
pack_dependencies_by_download_count: list):
""" By Issue #32038
1. Sort pack integration images by alphabetical order
2. Sort pack dependencies by download count
Pack integration images are shown before pack dependencies integration images
Args:
pack_integration_images (list): list of pack integration images
pack_dependencies_integration_images_dict: a mapping of pack dependency name to its integration images
pack_dependencies_by_download_count: a list of pack dependencies sorted by download count
Returns:
list: list of sorted integration images
"""
def sort_by_name(integration_image: dict):
return integration_image.get('name', '')
# sort packs integration images
pack_integration_images = sorted(pack_integration_images, key=sort_by_name)
# sort pack dependencies integration images
all_dep_int_imgs = pack_integration_images
for dep_pack_name in pack_dependencies_by_download_count:
if dep_pack_name in pack_dependencies_integration_images_dict:
logging.info(f'Adding {dep_pack_name} to deps int imgs')
dep_int_imgs = sorted(pack_dependencies_integration_images_dict[dep_pack_name], key=sort_by_name)
for dep_int_img in dep_int_imgs:
if dep_int_img not in all_dep_int_imgs: # avoid duplicates
all_dep_int_imgs.append(dep_int_img)
return all_dep_int_imgs
@staticmethod
def _get_all_pack_images(pack_integration_images, display_dependencies_images, dependencies_data,
pack_dependencies_by_download_count):
""" Returns data of uploaded pack integration images and it's path in gcs. Pack dependencies integration images
are added to that result as well.
Args:
pack_integration_images (list): list of uploaded to gcs integration images and it paths in gcs.
display_dependencies_images (list): list of pack names of additional dependencies images to display.
dependencies_data (dict): all level dependencies data.
pack_dependencies_by_download_count (list): list of pack names that are dependencies of the given pack
sorted by download count.
Returns:
list: collection of integration display name and it's path in gcs.
"""
dependencies_integration_images_dict: dict = {}
additional_dependencies_data = {k: v for k, v in dependencies_data.items() if k in display_dependencies_images}
for dependency_data in additional_dependencies_data.values():
for dep_int_img in dependency_data.get('integrations', []):
dep_int_img_gcs_path = dep_int_img.get('imagePath', '') # image public url
dep_int_img['name'] = Pack.remove_contrib_suffix_from_name(dep_int_img.get('name', ''))
dep_pack_name = os.path.basename(os.path.dirname(dep_int_img_gcs_path))
if dep_pack_name not in display_dependencies_images:
continue # skip if integration image is not part of displayed images of the given pack
if dep_int_img not in pack_integration_images: # avoid duplicates in list
if dep_pack_name in dependencies_integration_images_dict:
dependencies_integration_images_dict[dep_pack_name].append(dep_int_img)
else:
dependencies_integration_images_dict[dep_pack_name] = [dep_int_img]
return Pack.organize_integration_images(
pack_integration_images, dependencies_integration_images_dict, pack_dependencies_by_download_count
)
def is_feed_pack(self, yaml_content, yaml_type):
"""
Checks if an integration is a feed integration. If so, updates Pack._is_feed
Args:
yaml_content: The yaml content extracted by yaml.safe_load().
yaml_type: The type of object to check. Should be 'Playbook' or 'Integration'.
Returns:
Doesn't return
"""
if yaml_type == 'Integration':
if yaml_content.get('script', {}).get('feed', False) is True:
self._is_feed = True
if yaml_type == 'Playbook':
if yaml_content.get('name').startswith('TIM '):
self._is_feed = True
@staticmethod
def _clean_release_notes(release_notes_lines):
return re.sub(r'<\!--.*?-->', '', release_notes_lines, flags=re.DOTALL)
@staticmethod
def _parse_pack_dependencies(first_level_dependencies, all_level_pack_dependencies_data):
""" Parses user defined dependencies and returns dictionary with relevant data about each dependency pack.
Args:
first_level_dependencies (dict): first lever dependencies that were retrieved
from user pack_metadata.json file.
all_level_pack_dependencies_data (dict): all level pack dependencies data.
Returns:
dict: parsed dictionary with pack dependency data.
"""
parsed_result = {}
dependencies_data = {k: v for (k, v) in all_level_pack_dependencies_data.items()
if k in first_level_dependencies.keys() or k == GCPConfig.BASE_PACK}
for dependency_id, dependency_data in dependencies_data.items():
parsed_result[dependency_id] = {
"mandatory": first_level_dependencies.get(dependency_id, {}).get('mandatory', True),
"minVersion": dependency_data.get(Metadata.CURRENT_VERSION, Pack.PACK_INITIAL_VERSION),
"author": dependency_data.get('author', ''),
"name": dependency_data.get('name') if dependency_data.get('name') else dependency_id,
"certification": dependency_data.get('certification', 'certified')
}
return parsed_result
@staticmethod
def _create_support_section(support_type, support_url=None, support_email=None):
""" Creates support dictionary that is part of metadata.
In case of support type xsoar, adds default support url. If support is xsoar and support url is defined and
doesn't match xsoar default url, warning is raised.
Args:
support_type (str): support type of pack.
support_url (str): support full url.
support_email (str): support email address.
Returns:
dict: supported data dictionary.
"""
support_details = {}
if support_url: # set support url from user input
support_details['url'] = support_url
elif support_type == Metadata.XSOAR_SUPPORT: # in case support type is xsoar, set default xsoar support url
support_details['url'] = Metadata.XSOAR_SUPPORT_URL
# add support email if defined
if support_email:
support_details['email'] = support_email
return support_details
@staticmethod
def _get_author(support_type, author=None):
""" Returns pack author. In case support type is xsoar, more additional validation are applied.
Args:
support_type (str): support type of pack.
author (str): author of the pack.
Returns:
str: returns author from the input.
"""
if support_type == Metadata.XSOAR_SUPPORT and not author:
return Metadata.XSOAR_AUTHOR # returned xsoar default author
elif support_type == Metadata.XSOAR_SUPPORT and author != Metadata.XSOAR_AUTHOR:
logging.warning(f"{author} author doest not match {Metadata.XSOAR_AUTHOR} default value")
return author
else:
return author
@staticmethod
def _get_certification(support_type, certification=None):
""" Returns pack certification.
In case support type is xsoar or partner, CERTIFIED is returned.
In case support is not xsoar or partner but pack_metadata has certification field, certification value will be
taken from pack_metadata defined value.
Otherwise empty certification value (empty string) will be returned
Args:
support_type (str): support type of pack.
certification (str): certification value from pack_metadata, if exists.
Returns:
str: certification value
"""
if support_type in [Metadata.XSOAR_SUPPORT, Metadata.PARTNER_SUPPORT]:
return Metadata.CERTIFIED
elif certification:
return certification
else:
return ""
def _get_tags_from_landing_page(self, landing_page_sections: dict) -> set:
"""
Build the pack's tag list according to the user metadata and the landingPage sections file.
Args:
landing_page_sections (dict): landingPage sections and the packs in each one of them.
Returns:
set: Pack's tags.
"""
tags = set()
sections = landing_page_sections.get('sections', []) if landing_page_sections else []
for section in sections:
if self._pack_name in landing_page_sections.get(section, []):
tags.add(section)
return tags
def _parse_pack_metadata(self, build_number, commit_hash):
""" Parses pack metadata according to issue #19786 and #20091. Part of field may change over the time.
Args:
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
Returns:
dict: parsed pack metadata.
"""
pack_metadata = {
Metadata.NAME: self._display_name or self._pack_name,
Metadata.ID: self._pack_name,
Metadata.DESCRIPTION: self._description or self._pack_name,
Metadata.CREATED: self._create_date,
Metadata.UPDATED: self._update_date,
Metadata.LEGACY: self._legacy,
Metadata.SUPPORT: self._support_type,
Metadata.SUPPORT_DETAILS: self._support_details,
Metadata.EULA_LINK: self.eula_link,
Metadata.AUTHOR: self._author,
Metadata.AUTHOR_IMAGE: self._author_image,
Metadata.CERTIFICATION: self._certification,
Metadata.PRICE: self._price,
Metadata.SERVER_MIN_VERSION: self.user_metadata.get(Metadata.SERVER_MIN_VERSION) or self.server_min_version,
Metadata.CURRENT_VERSION: self.user_metadata.get(Metadata.CURRENT_VERSION, ''),
Metadata.VERSION_INFO: build_number,
Metadata.COMMIT: commit_hash,
Metadata.DOWNLOADS: self._downloads_count,
Metadata.TAGS: list(self._tags or []),
Metadata.CATEGORIES: self._categories,
Metadata.CONTENT_ITEMS: self._content_items,
Metadata.SEARCH_RANK: self._search_rank,
Metadata.INTEGRATIONS: self._related_integration_images,
Metadata.USE_CASES: self._use_cases,
Metadata.KEY_WORDS: self._keywords,
Metadata.DEPENDENCIES: self._dependencies
}
if self._is_private_pack:
pack_metadata.update({
Metadata.PREMIUM: self._is_premium,
Metadata.VENDOR_ID: self._vendor_id,
Metadata.PARTNER_ID: self._partner_id,
Metadata.PARTNER_NAME: self._partner_name,
Metadata.CONTENT_COMMIT_HASH: self._content_commit_hash,
Metadata.PREVIEW_ONLY: self._preview_only
})
return pack_metadata
def _load_pack_dependencies(self, index_folder_path, pack_names):
""" Loads dependencies metadata and returns mapping of pack id and it's loaded data.
Args:
index_folder_path (str): full path to download index folder.
pack_names (set): List of all packs.
Returns:
dict: pack id as key and loaded metadata of packs as value.
bool: True if the pack is missing dependencies, False otherwise.
"""
dependencies_data_result = {}
first_level_dependencies = self.user_metadata.get(Metadata.DEPENDENCIES, {})
all_level_displayed_dependencies = self.user_metadata.get(Metadata.DISPLAYED_IMAGES, [])
dependencies_ids = {d for d in first_level_dependencies.keys()}
dependencies_ids.update(all_level_displayed_dependencies)
if self._pack_name != GCPConfig.BASE_PACK: # check that current pack isn't Base Pack in order to prevent loop
dependencies_ids.add(GCPConfig.BASE_PACK) # Base pack is always added as pack dependency
for dependency_pack_id in dependencies_ids:
dependency_metadata_path = os.path.join(index_folder_path, dependency_pack_id, Pack.METADATA)
if os.path.exists(dependency_metadata_path):
with open(dependency_metadata_path, 'r') as metadata_file:
dependency_metadata = json.load(metadata_file)
dependencies_data_result[dependency_pack_id] = dependency_metadata
elif dependency_pack_id in pack_names:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
self._is_missing_dependencies = True
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} "
f"was not found in index, marking it as missing dependencies - to be resolved in next"
f" iteration over packs")
else:
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} was not found")
return dependencies_data_result, self._is_missing_dependencies
@staticmethod
def _get_updated_changelog_entry(changelog: dict, version: str, release_notes: str = None,
version_display_name: str = None, build_number_with_prefix: str = None,
released_time: str = None):
"""
Args:
changelog (dict): The changelog from the production bucket.
version (str): The version that is the key in the changelog of the entry wished to be updated.
release_notes (str): The release notes lines to update the entry with.
version_display_name (str): The version display name to update the entry with.
build_number_with_prefix(srt): the build number to modify the entry to, including the prefix R (if present).
released_time: The released time to update the entry with.
"""
changelog_entry = changelog.get(version)
if not changelog_entry:
raise Exception('The given version is not a key in the changelog')
version_display_name = \
version_display_name if version_display_name else changelog_entry[Changelog.DISPLAY_NAME].split('-')[0]
build_number_with_prefix = \
build_number_with_prefix if build_number_with_prefix else \
changelog_entry[Changelog.DISPLAY_NAME].split('-')[1]
changelog_entry[Changelog.RELEASE_NOTES] = release_notes if release_notes else changelog_entry[
Changelog.RELEASE_NOTES]
changelog_entry[Changelog.DISPLAY_NAME] = f'{version_display_name} - {build_number_with_prefix}'
changelog_entry[Changelog.RELEASED] = released_time if released_time else changelog_entry[Changelog.RELEASED]
return changelog_entry
def _create_changelog_entry(self, release_notes, version_display_name, build_number, pack_was_modified=False,
new_version=True, initial_release=False):
""" Creates dictionary entry for changelog.
Args:
release_notes (str): release notes md.
version_display_name (str): display name version.
build_number (srt): current build number.
pack_was_modified (bool): whether the pack was modified.
new_version (bool): whether the entry is new or not. If not new, R letter will be appended to build number.
initial_release (bool): whether the entry is an initial release or not.
Returns:
dict: release notes entry of changelog
"""
if new_version:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
elif initial_release:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: self._create_date}
elif pack_was_modified:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - R{build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
return {}
def remove_unwanted_files(self, delete_test_playbooks=True):
""" Iterates over pack folder and removes hidden files and unwanted folders.
Args:
delete_test_playbooks (bool): whether to delete test playbooks folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = True
try:
for directory in Pack.EXCLUDE_DIRECTORIES:
if delete_test_playbooks and os.path.isdir(f'{self._pack_path}/{directory}'):
shutil.rmtree(f'{self._pack_path}/{directory}')
logging.info(f"Deleted {directory} directory from {self._pack_name} pack")
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for pack_file in files:
full_file_path = os.path.join(root, pack_file)
# removing unwanted files
if pack_file.startswith('.') \
or pack_file in [Pack.AUTHOR_IMAGE_NAME, Pack.USER_METADATA] \
or pack_file in self._remove_files_list:
os.remove(full_file_path)
logging.info(f"Deleted pack {pack_file} file for {self._pack_name} pack")
continue
except Exception:
task_status = False
logging.exception(f"Failed to delete ignored files for pack {self._pack_name}")
finally:
return task_status
def sign_pack(self, signature_string=None):
""" Signs pack folder and creates signature file.
Args:
signature_string (str): Base64 encoded string used to sign the pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
if signature_string:
with open("keyfile", "wb") as keyfile:
keyfile.write(signature_string.encode())
arg = f'./signDirectory {self._pack_path} keyfile base64'
signing_process = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = signing_process.communicate()
if err:
logging.error(f"Failed to sign pack for {self._pack_name} - {str(err)}")
return
logging.info(f"Signed {self._pack_name} pack successfully")
else:
logging.info(f"No signature provided. Skipped signing {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed to sign pack for {self._pack_name}")
finally:
return task_status
@staticmethod
def encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
zip_pack_path (str): The path to the encrypted zip pack.
pack_name (str): The name of the pack that should be encrypted.
encryption_key (str): The key which we can decrypt the pack with.
extract_destination_path (str): The path in which the pack resides.
private_artifacts_dir (str): The chosen name for the private artifacts directory.
secondary_encryption_key (str) : A second key which we can decrypt the pack with.
"""
try:
current_working_dir = os.getcwd()
shutil.copy('./encryptor', os.path.join(extract_destination_path, 'encryptor'))
os.chmod(os.path.join(extract_destination_path, 'encryptor'), stat.S_IXOTH)
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./encryptor', shell=True)
output_file = zip_pack_path.replace("_not_encrypted.zip", ".zip")
full_command = f'./encryptor ./{pack_name}_not_encrypted.zip {output_file} "{encryption_key}"'
subprocess.call(full_command, shell=True)
secondary_encryption_key_output_file = zip_pack_path.replace("_not_encrypted.zip", ".enc2.zip")
full_command_with_secondary_encryption = f'./encryptor ./{pack_name}_not_encrypted.zip ' \
f'{secondary_encryption_key_output_file}' \
f' "{secondary_encryption_key}"'
subprocess.call(full_command_with_secondary_encryption, shell=True)
new_artefacts = os.path.join(current_working_dir, private_artifacts_dir)
if os.path.exists(new_artefacts):
shutil.rmtree(new_artefacts)
os.mkdir(path=new_artefacts)
shutil.copy(zip_pack_path, os.path.join(new_artefacts, f'{pack_name}_not_encrypted.zip'))
shutil.copy(output_file, os.path.join(new_artefacts, f'{pack_name}.zip'))
shutil.copy(secondary_encryption_key_output_file, os.path.join(new_artefacts, f'{pack_name}.enc2.zip'))
os.chdir(current_working_dir)
except (subprocess.CalledProcessError, shutil.Error) as error:
print(f"Error while trying to encrypt pack. {error}")
def decrypt_pack(self, encrypted_zip_pack_path, decryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the decryption succeeded.
"""
try:
current_working_dir = os.getcwd()
extract_destination_path = f'{current_working_dir}/decrypt_pack_dir'
os.mkdir(extract_destination_path)
shutil.copy('./decryptor', os.path.join(extract_destination_path, 'decryptor'))
secondary_encrypted_pack_path = os.path.join(extract_destination_path, 'encrypted_zip_pack.zip')
shutil.copy(encrypted_zip_pack_path, secondary_encrypted_pack_path)
os.chmod(os.path.join(extract_destination_path, 'decryptor'), stat.S_IXOTH)
output_decrypt_file_path = f"{extract_destination_path}/decrypt_pack.zip"
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./decryptor', shell=True)
full_command = f'./decryptor {secondary_encrypted_pack_path} {output_decrypt_file_path} "{decryption_key}"'
process = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
shutil.rmtree(extract_destination_path)
os.chdir(current_working_dir)
if stdout:
logging.info(str(stdout))
if stderr:
logging.error(f"Error: Premium pack {self._pack_name} should be encrypted, but isn't.")
return False
return True
except subprocess.CalledProcessError as error:
logging.exception(f"Error while trying to decrypt pack. {error}")
return False
def is_pack_encrypted(self, encrypted_zip_pack_path, decryption_key):
""" Checks if the pack is encrypted by trying to decrypt it.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the pack is encrypted.
"""
return self.decrypt_pack(encrypted_zip_pack_path, decryption_key)
def zip_pack(self, extract_destination_path="", pack_name="", encryption_key="",
private_artifacts_dir='private_artifacts', secondary_encryption_key=""):
""" Zips pack folder.
Returns:
bool: whether the operation succeeded.
str: full path to created pack zip.
"""
zip_pack_path = f"{self._pack_path}.zip" if not encryption_key else f"{self._pack_path}_not_encrypted.zip"
task_status = False
try:
with ZipFile(zip_pack_path, 'w', ZIP_DEFLATED) as pack_zip:
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for f in files:
full_file_path = os.path.join(root, f)
relative_file_path = os.path.relpath(full_file_path, self._pack_path)
pack_zip.write(filename=full_file_path, arcname=relative_file_path)
if encryption_key:
self.encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key)
task_status = True
logging.success(f"Finished zipping {self._pack_name} pack.")
except Exception:
logging.exception(f"Failed in zipping {self._pack_name} folder")
finally:
# If the pack needs to be encrypted, it is initially at a different location than this final path
final_path_to_zipped_pack = f"{self._pack_path}.zip"
return task_status, final_path_to_zipped_pack
def detect_modified(self, content_repo, index_folder_path, current_commit_hash, previous_commit_hash):
""" Detects pack modified files.
The diff is done between current commit and previous commit that was saved in metadata that was downloaded from
index. In case that no commit was found in index (initial run), the default value will be set to previous commit
from origin/master.
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): full path to downloaded index folder.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with.
Returns:
bool: whether the operation succeeded.
list: list of RN files that were modified.
bool: whether pack was modified and override will be required.
"""
task_status = False
modified_rn_files_paths = []
pack_was_modified = False
try:
pack_index_metadata_path = os.path.join(index_folder_path, self._pack_name, Pack.METADATA)
if not os.path.exists(pack_index_metadata_path):
logging.info(f"{self._pack_name} pack was not found in index, skipping detection of modified pack.")
task_status = True
return
with open(pack_index_metadata_path, 'r') as metadata_file:
downloaded_metadata = json.load(metadata_file)
previous_commit_hash = downloaded_metadata.get(Metadata.COMMIT, previous_commit_hash)
# set 2 commits by hash value in order to check the modified files of the diff
current_commit = content_repo.commit(current_commit_hash)
previous_commit = content_repo.commit(previous_commit_hash)
for modified_file in current_commit.diff(previous_commit):
if modified_file.a_path.startswith(PACKS_FOLDER):
modified_file_path_parts = os.path.normpath(modified_file.a_path).split(os.sep)
if modified_file_path_parts[1] and modified_file_path_parts[1] == self._pack_name:
if not is_ignored_pack_file(modified_file_path_parts):
logging.info(f"Detected modified files in {self._pack_name} pack")
task_status, pack_was_modified = True, True
modified_rn_files_paths.append(modified_file.a_path)
else:
logging.debug(f'{modified_file.a_path} is an ignored file')
task_status = True
if pack_was_modified:
# Make sure the modification is not only of release notes files, if so count that as not modified
pack_was_modified = not all(self.RELEASE_NOTES in path for path in modified_rn_files_paths)
# Filter modifications in release notes config JSON file - they will be handled later on.
modified_rn_files_paths = [path_ for path_ in modified_rn_files_paths if path_.endswith('.md')]
return
except Exception:
logging.exception(f"Failed in detecting modified files of {self._pack_name} pack")
finally:
return task_status, modified_rn_files_paths, pack_was_modified
def upload_to_storage(self, zip_pack_path, latest_version, storage_bucket, override_pack, storage_base_path,
private_content=False, pack_artifacts_path=None):
""" Manages the upload of pack zip artifact to correct path in cloud storage.
The zip pack will be uploaded to following path: /content/packs/pack_name/pack_latest_version.
In case that zip pack artifact already exist at constructed path, the upload will be skipped.
If flag override_pack is set to True, pack will forced for upload.
Args:
zip_pack_path (str): full path to pack zip artifact.
latest_version (str): pack latest version.
storage_bucket (google.cloud.storage.bucket.Bucket): google cloud storage bucket.
override_pack (bool): whether to override existing pack.
private_content (bool): Is being used in a private content build.
pack_artifacts_path (str): Path to where we are saving pack artifacts.
Returns:
bool: whether the operation succeeded.
bool: True in case of pack existence at targeted path and upload was skipped, otherwise returned False.
"""
task_status = True
try:
version_pack_path = os.path.join(storage_base_path, self._pack_name, latest_version)
existing_files = [f.name for f in storage_bucket.list_blobs(prefix=version_pack_path)]
if existing_files and not override_pack:
logging.warning(f"The following packs already exist at storage: {', '.join(existing_files)}")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return task_status, True, None
pack_full_path = os.path.join(version_pack_path, f"{self._pack_name}.zip")
blob = storage_bucket.blob(pack_full_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(zip_pack_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
if private_content:
secondary_encryption_key_pack_name = f"{self._pack_name}.enc2.zip"
secondary_encryption_key_bucket_path = os.path.join(version_pack_path,
secondary_encryption_key_pack_name)
# In some cases the path given is actually a zip.
if pack_artifacts_path.endswith('content_packs.zip'):
_pack_artifacts_path = pack_artifacts_path.replace('/content_packs.zip', '')
else:
_pack_artifacts_path = pack_artifacts_path
secondary_encryption_key_artifacts_path = zip_pack_path.replace(f'{self._pack_name}',
f'{self._pack_name}.enc2')
blob = storage_bucket.blob(secondary_encryption_key_bucket_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(secondary_encryption_key_artifacts_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
print(
f"Copying {secondary_encryption_key_artifacts_path} to {_pack_artifacts_path}/"
f"packs/{self._pack_name}.zip")
shutil.copy(secondary_encryption_key_artifacts_path,
f'{_pack_artifacts_path}/packs/{self._pack_name}.zip')
self.public_storage_path = blob.public_url
logging.success(f"Uploaded {self._pack_name} pack to {pack_full_path} path.")
return task_status, False, pack_full_path
except Exception:
task_status = False
logging.exception(f"Failed in uploading {self._pack_name} pack to gcs.")
return task_status, True, None
def copy_and_upload_to_storage(self, production_bucket, build_bucket, successful_packs_dict, storage_base_path,
build_bucket_base_path):
""" Manages the copy of pack zip artifact from the build bucket to the production bucket.
The zip pack will be copied to following path: /content/packs/pack_name/pack_latest_version if
the pack exists in the successful_packs_dict from Prepare content step in Create Instances job.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): google cloud production bucket.
build_bucket (google.cloud.storage.bucket.Bucket): google cloud build bucket.
successful_packs_dict (dict): the dict of all packs were uploaded in prepare content step
storage_base_path (str): The target destination of the upload in the target bucket.
build_bucket_base_path (str): The path of the build bucket in gcp.
Returns:
bool: Status - whether the operation succeeded.
bool: Skipped pack - true in case of pack existence at the targeted path and the copy process was skipped,
otherwise returned False.
"""
pack_not_uploaded_in_prepare_content = self._pack_name not in successful_packs_dict
if pack_not_uploaded_in_prepare_content:
logging.warning("The following packs already exist at storage.")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return True, True
latest_version = successful_packs_dict[self._pack_name][BucketUploadFlow.LATEST_VERSION]
self._latest_version = latest_version
build_version_pack_path = os.path.join(build_bucket_base_path, self._pack_name, latest_version)
# Verifying that the latest version of the pack has been uploaded to the build bucket
existing_bucket_version_files = [f.name for f in build_bucket.list_blobs(prefix=build_version_pack_path)]
if not existing_bucket_version_files:
logging.error(f"{self._pack_name} latest version ({latest_version}) was not found on build bucket at "
f"path {build_version_pack_path}.")
return False, False
# We upload the pack zip object taken from the build bucket into the production bucket
prod_version_pack_path = os.path.join(storage_base_path, self._pack_name, latest_version)
prod_pack_zip_path = os.path.join(prod_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_path = os.path.join(build_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_blob = build_bucket.blob(build_pack_zip_path)
try:
copied_blob = build_bucket.copy_blob(
blob=build_pack_zip_blob, destination_bucket=production_bucket, new_name=prod_pack_zip_path
)
copied_blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
self.public_storage_path = copied_blob.public_url
task_status = copied_blob.exists()
except Exception as e:
pack_suffix = os.path.join(self._pack_name, latest_version, f'{self._pack_name}.zip')
logging.exception(f"Failed copying {pack_suffix}. Additional Info: {str(e)}")
return False, False
if not task_status:
logging.error(f"Failed in uploading {self._pack_name} pack to production gcs.")
else:
# Determine if pack versions were aggregated during upload
pack_uploaded_in_prepare_content = not pack_not_uploaded_in_prepare_content
if pack_uploaded_in_prepare_content:
agg_str = successful_packs_dict[self._pack_name].get('aggregated')
if agg_str:
self._aggregated = True
self._aggregation_str = agg_str
logging.success(f"Uploaded {self._pack_name} pack to {prod_pack_zip_path} path.")
return task_status, False
def get_changelog_latest_rn(self, changelog_index_path: str) -> Tuple[dict, LooseVersion, str]:
"""
Returns the changelog file contents and the last version of rn in the changelog file
Args:
changelog_index_path (str): the changelog.json file path in the index
Returns: the changelog file contents, the last version, and contents of rn in the changelog file
"""
logging.info(f"Found Changelog for: {self._pack_name}")
if os.path.exists(changelog_index_path):
try:
with open(changelog_index_path, "r") as changelog_file:
changelog = json.load(changelog_file)
except json.JSONDecodeError:
changelog = {}
else:
changelog = {}
# get the latest rn version in the changelog.json file
changelog_rn_versions = [LooseVersion(ver) for ver in changelog]
# no need to check if changelog_rn_versions isn't empty because changelog file exists
changelog_latest_rn_version = max(changelog_rn_versions)
changelog_latest_rn = changelog[changelog_latest_rn_version.vstring]["releaseNotes"]
return changelog, changelog_latest_rn_version, changelog_latest_rn
def get_modified_release_notes_lines(self, release_notes_dir: str, new_release_notes_versions: list,
changelog: dict, modified_rn_files: list):
"""
In the case where an rn file was changed, this function returns the new content
of the release note in the format suitable for the changelog file.
In general, if two rn files are created between two consecutive upload runs (i.e. pack was changed twice),
the rn files are being aggregated and the latter version is the one that is being used as a key in the changelog
file, and the aggregated rns as the value.
Hence, in the case of changing an rn as such, this function re-aggregates all of the rns under the
corresponding version key, and returns the aggregated data, in the right format, as value under that key.
Args:
release_notes_dir (str): the path to the release notes dir
new_release_notes_versions (list): a list of the new versions of release notes in the pack since the
last upload. This means they were already handled on this upload run (and aggregated if needed).
changelog (dict): the changelog from the production bucket.
modified_rn_files (list): a list of the rn files that were modified according to the last commit in
'filename.md' format.
Returns:
A dict of modified version and their release notes contents, for modified
in the current index file
"""
modified_versions_dict = {}
for rn_filename in modified_rn_files:
version = underscore_file_name_to_dotted_version(rn_filename)
# Should only apply on modified files that are not the last rn file
if version in new_release_notes_versions:
continue
# The case where the version is a key in the changelog file,
# and the value is not an aggregated release note
if is_the_only_rn_in_block(release_notes_dir, version, changelog):
logging.info("The version is a key in the changelog file and by itself in the changelog block")
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
modified_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
# The case where the version is not a key in the changelog file or it is a key of aggregated content
else:
logging.debug(f'The "{version}" version is not a key in the changelog file or it is a key of'
f' aggregated content')
same_block_versions_dict, higher_nearest_version = self.get_same_block_versions(
release_notes_dir, version, changelog)
modified_versions_dict[higher_nearest_version] = aggregate_release_notes_for_marketplace(
same_block_versions_dict)
return modified_versions_dict
def get_same_block_versions(self, release_notes_dir: str, version: str, changelog: dict):
"""
Get a dict of the version as key and rn data as value of all of the versions that are in the same
block in the changelog file as the given version (these are the versions that were aggregates together
during a single upload priorly).
Args:
release_notes_dir (str): the path to the release notes dir
version (str): the wanted version
changelog (dict): the changelog from the production bucket.
Returns:
A dict of version, rn data for all corresponding versions, and the highest version among those keys as str
"""
lowest_version = [LooseVersion(Pack.PACK_INITIAL_VERSION)]
lower_versions: list = []
higher_versions: list = []
same_block_versions_dict: dict = dict()
for item in changelog.keys(): # divide the versions into lists of lower and higher than given version
(lower_versions if LooseVersion(item) < version else higher_versions).append(LooseVersion(item))
higher_nearest_version = min(higher_versions)
lower_versions = lower_versions + lowest_version # if the version is 1.0.0, ensure lower_versions is not empty
lower_nearest_version = max(lower_versions)
for rn_filename in filter_dir_files_by_extension(release_notes_dir, '.md'):
current_version = underscore_file_name_to_dotted_version(rn_filename)
# Catch all versions that are in the same block
if lower_nearest_version < LooseVersion(current_version) <= higher_nearest_version:
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
same_block_versions_dict[current_version] = self._clean_release_notes(rn_lines).strip()
return same_block_versions_dict, higher_nearest_version.vstring
def get_release_notes_lines(self, release_notes_dir: str, changelog_latest_rn_version: LooseVersion,
changelog_latest_rn: str) -> Tuple[str, str, list]:
"""
Prepares the release notes contents for the new release notes entry
Args:
release_notes_dir (str): the path to the release notes dir
changelog_latest_rn_version (LooseVersion): the last version of release notes in the changelog.json file
changelog_latest_rn (str): the last release notes in the changelog.json file
Returns: The release notes contents, the latest release notes version (in the release notes directory),
and a list of the new rn versions that this is the first time they have been uploaded.
"""
found_versions: list = list()
pack_versions_dict: dict = dict()
for filename in sorted(filter_dir_files_by_extension(release_notes_dir, '.md')):
version = underscore_file_name_to_dotted_version(filename)
# Aggregate all rn files that are bigger than what we have in the changelog file
if LooseVersion(version) > changelog_latest_rn_version:
with open(os.path.join(release_notes_dir, filename), 'r') as rn_file:
rn_lines = rn_file.read()
pack_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
found_versions.append(LooseVersion(version))
latest_release_notes_version = max(found_versions)
latest_release_notes_version_str = latest_release_notes_version.vstring
logging.info(f"Latest ReleaseNotes version is: {latest_release_notes_version_str}")
if len(pack_versions_dict) > 1:
# In case that there is more than 1 new release notes file, wrap all release notes together for one
# changelog entry
aggregation_str = f"[{', '.join(lv.vstring for lv in found_versions if lv > changelog_latest_rn_version)}]"\
f" => {latest_release_notes_version_str}"
logging.info(f"Aggregating ReleaseNotes versions: {aggregation_str}")
release_notes_lines = aggregate_release_notes_for_marketplace(pack_versions_dict)
self._aggregated = True
self._aggregation_str = aggregation_str
elif len(pack_versions_dict) == 1:
# In case where there is only one new release notes file
release_notes_lines = pack_versions_dict[latest_release_notes_version_str]
else:
# In case where the pack is up to date, i.e. latest changelog is latest rn file
# We should take the release notes from the index as it has might been aggregated
logging.info(f'No new RN file was detected for pack {self._pack_name}, taking latest RN from the index')
release_notes_lines = changelog_latest_rn
new_release_notes_versions = list(pack_versions_dict.keys())
return release_notes_lines, latest_release_notes_version_str, new_release_notes_versions
def assert_upload_bucket_version_matches_release_notes_version(self,
changelog: dict,
latest_release_notes: str) -> None:
"""
Sometimes there is a the current bucket is not merged from master there could be another version in the upload
bucket, that does not exist in the current branch.
This case can cause unpredicted behavior and we want to fail the build.
This method validates that this is not the case in the current build, and if it does - fails it with an
assertion error.
Args:
changelog: The changelog from the production bucket.
latest_release_notes: The latest release notes version string in the current branch
"""
changelog_latest_release_notes = max(changelog, key=lambda k: LooseVersion(k)) # pylint: disable=W0108
assert LooseVersion(latest_release_notes) >= LooseVersion(changelog_latest_release_notes), \
f'{self._pack_name}: Version mismatch detected between upload bucket and current branch\n' \
f'Upload bucket version: {changelog_latest_release_notes}\n' \
f'current branch version: {latest_release_notes}\n' \
'Please Merge from master and rebuild'
def get_rn_files_names(self, modified_rn_files_paths):
"""
Args:
modified_rn_files_paths: a list containing all modified files in the current pack, generated
by comparing the old and the new commit hash.
Returns:
The names of the modified release notes files out of the given list only,
as in the names of the files that are under ReleaseNotes directory in the format of 'filename.md'.
"""
modified_rn_files = []
for file_path in modified_rn_files_paths:
modified_file_path_parts = os.path.normpath(file_path).split(os.sep)
if self.RELEASE_NOTES in modified_file_path_parts:
modified_rn_files.append(modified_file_path_parts[-1])
return modified_rn_files
def prepare_release_notes(self, index_folder_path, build_number, pack_was_modified=False,
modified_rn_files_paths=None):
"""
Handles the creation and update of the changelog.json files.
Args:
index_folder_path (str): Path to the unzipped index json.
build_number (str): circleCI build number.
pack_was_modified (bool): whether the pack modified or not.
modified_rn_files_paths (list): list of paths of the pack's modified file
Returns:
bool: whether the operation succeeded.
bool: whether running build has not updated pack release notes.
"""
task_status = False
not_updated_build = False
release_notes_dir = os.path.join(self._pack_path, Pack.RELEASE_NOTES)
modified_rn_files_paths = modified_rn_files_paths if modified_rn_files_paths else []
try:
# load changelog from downloaded index
logging.info(f"Loading changelog for {self._pack_name} pack")
changelog_index_path = os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
if os.path.exists(changelog_index_path):
changelog, changelog_latest_rn_version, changelog_latest_rn = \
self.get_changelog_latest_rn(changelog_index_path)
if os.path.exists(release_notes_dir):
# Handling latest release notes files
release_notes_lines, latest_release_notes, new_release_notes_versions = \
self.get_release_notes_lines(
release_notes_dir, changelog_latest_rn_version, changelog_latest_rn)
self.assert_upload_bucket_version_matches_release_notes_version(changelog, latest_release_notes)
# Handling modified old release notes files, if there are any
rn_files_names = self.get_rn_files_names(modified_rn_files_paths)
modified_release_notes_lines_dict = self.get_modified_release_notes_lines(
release_notes_dir, new_release_notes_versions, changelog, rn_files_names)
if self._current_version != latest_release_notes:
logging.error(f"Version mismatch detected between current version: {self._current_version} "
f"and latest release notes version: {latest_release_notes}")
task_status = False
return task_status, not_updated_build
else:
if latest_release_notes in changelog:
logging.info(f"Found existing release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
pack_was_modified=pack_was_modified,
new_version=False)
else:
logging.info(f"Created new release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
new_version=True)
if version_changelog:
changelog[latest_release_notes] = version_changelog
if modified_release_notes_lines_dict:
logging.info("updating changelog entries for modified rn")
for version, modified_release_notes_lines in modified_release_notes_lines_dict.items():
updated_entry = self._get_updated_changelog_entry(
changelog, version, release_notes=modified_release_notes_lines)
changelog[version] = updated_entry
else: # will enter only on initial version and release notes folder still was not created
if len(changelog.keys()) > 1 or Pack.PACK_INITIAL_VERSION not in changelog:
logging.warning(
f"{self._pack_name} pack mismatch between {Pack.CHANGELOG_JSON} and {Pack.RELEASE_NOTES}")
task_status, not_updated_build = True, True
return task_status, not_updated_build
changelog[Pack.PACK_INITIAL_VERSION] = self._create_changelog_entry(
release_notes=self.description,
version_display_name=Pack.PACK_INITIAL_VERSION,
build_number=build_number,
initial_release=True,
new_version=False)
logging.info(f"Found existing release notes for version: {Pack.PACK_INITIAL_VERSION} "
f"in the {self._pack_name} pack.")
elif self._current_version == Pack.PACK_INITIAL_VERSION:
version_changelog = self._create_changelog_entry(
release_notes=self.description,
version_display_name=Pack.PACK_INITIAL_VERSION,
build_number=build_number,
new_version=True,
initial_release=True
)
changelog = {
Pack.PACK_INITIAL_VERSION: version_changelog
}
elif self._hidden:
logging.warning(f"Pack {self._pack_name} is deprecated. Skipping release notes handling.")
task_status = True
not_updated_build = True
return task_status, not_updated_build
else:
logging.error(f"No release notes found for: {self._pack_name}")
task_status = False
return task_status, not_updated_build
# Update change log entries with BC flag.
self.add_bc_entries_if_needed(release_notes_dir, changelog)
# write back changelog with changes to pack folder
with open(os.path.join(self._pack_path, Pack.CHANGELOG_JSON), "w") as pack_changelog:
json.dump(changelog, pack_changelog, indent=4)
task_status = True
logging.success(f"Finished creating {Pack.CHANGELOG_JSON} for {self._pack_name}")
except Exception as e:
logging.error(f"Failed creating {Pack.CHANGELOG_JSON} file for {self._pack_name}.\n "
f"Additional info: {e}")
finally:
return task_status, not_updated_build
def create_local_changelog(self, build_index_folder_path):
""" Copies the pack index changelog.json file to the pack path
Args:
build_index_folder_path: The path to the build index folder
Returns:
bool: whether the operation succeeded.
"""
task_status = True
build_changelog_index_path = os.path.join(build_index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
pack_changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if os.path.exists(build_changelog_index_path):
try:
shutil.copyfile(src=build_changelog_index_path, dst=pack_changelog_path)
logging.success(f"Successfully copied pack index changelog.json file from {build_changelog_index_path}"
f" to {pack_changelog_path}.")
except shutil.Error as e:
task_status = False
logging.error(f"Failed copying changelog.json file from {build_changelog_index_path} to "
f"{pack_changelog_path}. Additional info: {str(e)}")
return task_status
else:
task_status = False
logging.error(
f"{self._pack_name} index changelog file is missing in build bucket path: {build_changelog_index_path}")
return task_status and self.is_changelog_exists()
def collect_content_items(self):
""" Iterates over content items folders inside pack and collects content items data.
Returns:
dict: Parsed content items
.
"""
task_status = False
content_items_result: dict = {}
try:
# the format is defined in issue #19786, may change in the future
content_item_name_mapping = {
PackFolders.SCRIPTS.value: "automation",
PackFolders.PLAYBOOKS.value: "playbook",
PackFolders.INTEGRATIONS.value: "integration",
PackFolders.INCIDENT_FIELDS.value: "incidentfield",
PackFolders.INCIDENT_TYPES.value: "incidenttype",
PackFolders.DASHBOARDS.value: "dashboard",
PackFolders.INDICATOR_FIELDS.value: "indicatorfield",
PackFolders.REPORTS.value: "report",
PackFolders.INDICATOR_TYPES.value: "reputation",
PackFolders.LAYOUTS.value: "layoutscontainer",
PackFolders.CLASSIFIERS.value: "classifier",
PackFolders.WIDGETS.value: "widget",
PackFolders.GENERIC_DEFINITIONS.value: "genericdefinition",
PackFolders.GENERIC_FIELDS.value: "genericfield",
PackFolders.GENERIC_MODULES.value: "genericmodule",
PackFolders.GENERIC_TYPES.value: "generictype",
PackFolders.LISTS.value: "list",
PackFolders.PREPROCESS_RULES.value: "preprocessrule",
PackFolders.JOBS.value: "job",
}
for root, pack_dirs, pack_files_names in os.walk(self._pack_path, topdown=False):
current_directory = root.split(os.path.sep)[-1]
parent_directory = root.split(os.path.sep)[-2]
if parent_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
current_directory = parent_directory
elif current_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
continue
folder_collected_items = []
for pack_file_name in pack_files_names:
if not pack_file_name.endswith(('.json', '.yml')):
continue
pack_file_path = os.path.join(root, pack_file_name)
# reputation in old format aren't supported in 6.0.0 server version
if current_directory == PackFolders.INDICATOR_TYPES.value \
and not fnmatch.fnmatch(pack_file_name, 'reputation-*.json'):
os.remove(pack_file_path)
logging.info(f"Deleted pack {pack_file_name} reputation file for {self._pack_name} pack")
continue
with open(pack_file_path, 'r') as pack_file:
if current_directory in PackFolders.yml_supported_folders():
content_item = yaml.safe_load(pack_file)
elif current_directory in PackFolders.json_supported_folders():
content_item = json.load(pack_file)
else:
continue
# check if content item has to version
to_version = content_item.get('toversion') or content_item.get('toVersion')
if to_version and LooseVersion(to_version) < LooseVersion(Metadata.SERVER_DEFAULT_MIN_VERSION):
os.remove(pack_file_path)
logging.info(
f"{self._pack_name} pack content item {pack_file_name} has to version: {to_version}. "
f"{pack_file_name} file was deleted.")
continue
if current_directory not in PackFolders.pack_displayed_items():
continue # skip content items that are not displayed in contentItems
logging.debug(
f"Iterating over {pack_file_path} file and collecting items of {self._pack_name} pack")
# updated min server version from current content item
self._server_min_version = get_updated_server_version(self._server_min_version, content_item,
self._pack_name)
content_item_tags = content_item.get('tags', [])
if current_directory == PackFolders.SCRIPTS.value:
folder_collected_items.append({
'id': content_item.get('commonfields', {}).get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('comment', ''),
'tags': content_item_tags,
})
if not self._contains_transformer and 'transformer' in content_item_tags:
self._contains_transformer = True
if not self._contains_filter and 'filter' in content_item_tags:
self._contains_filter = True
elif current_directory == PackFolders.PLAYBOOKS.value:
self.is_feed_pack(content_item, 'Playbook')
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INTEGRATIONS.value:
integration_commands = content_item.get('script', {}).get('commands', [])
self.is_feed_pack(content_item, 'Integration')
folder_collected_items.append({
'id': content_item.get('commonfields', {}).get('id', ''),
'name': content_item.get('display', ''),
'description': content_item.get('description', ''),
'category': content_item.get('category', ''),
'commands': [
{'name': c.get('name', ''), 'description': c.get('description', '')}
for c in integration_commands],
})
elif current_directory == PackFolders.INCIDENT_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'type': content_item.get('type', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INCIDENT_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'playbook': content_item.get('playbookId', ''),
'closureScript': content_item.get('closureScript', ''),
'hours': int(content_item.get('hours', 0)),
'days': int(content_item.get('days', 0)),
'weeks': int(content_item.get('weeks', 0)),
})
elif current_directory == PackFolders.DASHBOARDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
})
elif current_directory == PackFolders.INDICATOR_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'type': content_item.get('type', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.REPORTS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INDICATOR_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'details': content_item.get('details', ''),
'reputationScriptName': content_item.get('reputationScriptName', ''),
'enhancementScriptNames': content_item.get('enhancementScriptNames', []),
})
elif current_directory == PackFolders.LAYOUTS.value:
layout_metadata = {
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
}
layout_description = content_item.get('description')
if layout_description is not None:
layout_metadata['description'] = layout_description
folder_collected_items.append(layout_metadata)
elif current_directory == PackFolders.CLASSIFIERS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name') or content_item.get('id', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.WIDGETS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'dataType': content_item.get('dataType', ''),
'widgetType': content_item.get('widgetType', ''),
})
elif current_directory == PackFolders.LISTS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', '')
})
elif current_directory == PackFolders.GENERIC_DEFINITIONS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif parent_directory == PackFolders.GENERIC_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
'type': content_item.get('type', ''),
})
elif current_directory == PackFolders.GENERIC_MODULES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif parent_directory == PackFolders.GENERIC_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.PREPROCESS_RULES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.JOBS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
# note that `name` may technically be blank, but shouldn't pass validations
'name': content_item.get('name', ''),
'details': content_item.get('details', ''),
})
if current_directory in PackFolders.pack_displayed_items():
content_item_key = content_item_name_mapping[current_directory]
content_items_result[content_item_key] = \
content_items_result.get(content_item_key, []) + folder_collected_items
logging.success(f"Finished collecting content items for {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed collecting content items in {self._pack_name} pack")
finally:
self._content_items = content_items_result
return task_status
def load_user_metadata(self):
""" Loads user defined metadata and stores part of it's data in defined properties fields.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
user_metadata = {}
try:
user_metadata_path = os.path.join(self._pack_path, Pack.USER_METADATA) # user metadata path before parsing
if not os.path.exists(user_metadata_path):
logging.error(f"{self._pack_name} pack is missing {Pack.USER_METADATA} file.")
return task_status
with open(user_metadata_path, "r") as user_metadata_file:
user_metadata = json.load(user_metadata_file) # loading user metadata
# part of old packs are initialized with empty list
user_metadata = {} if isinstance(user_metadata, list) else user_metadata
# store important user metadata fields
self.support_type = user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self.current_version = user_metadata.get(Metadata.CURRENT_VERSION, '')
self.hidden = user_metadata.get(Metadata.HIDDEN, False)
self.description = user_metadata.get(Metadata.DESCRIPTION, False)
self.display_name = user_metadata.get(Metadata.NAME, '') # type: ignore[misc]
self._user_metadata = user_metadata
self.eula_link = user_metadata.get(Metadata.EULA_LINK, Metadata.EULA_URL)
logging.info(f"Finished loading {self._pack_name} pack user metadata")
task_status = True
except Exception:
logging.exception(f"Failed in loading {self._pack_name} user metadata.")
finally:
return task_status
def _collect_pack_tags(self, user_metadata, landing_page_sections, trending_packs):
tags = set(input_to_list(input_data=user_metadata.get('tags')))
tags |= self._get_tags_from_landing_page(landing_page_sections)
tags |= {PackTags.TIM} if self._is_feed else set()
tags |= {PackTags.USE_CASE} if self._use_cases else set()
tags |= {PackTags.TRANSFORMER} if self._contains_transformer else set()
tags |= {PackTags.FILTER} if self._contains_filter else set()
if self._create_date:
days_since_creation = (datetime.utcnow() - datetime.strptime(self._create_date, Metadata.DATE_FORMAT)).days
if days_since_creation <= 30:
tags |= {PackTags.NEW}
else:
tags -= {PackTags.NEW}
if trending_packs:
if self._pack_name in trending_packs:
tags |= {PackTags.TRENDING}
else:
tags -= {PackTags.TRENDING}
return tags
def _enhance_pack_attributes(self, index_folder_path, pack_was_modified,
dependencies_data, statistics_handler=None, format_dependencies_only=False):
""" Enhances the pack object with attributes for the metadata file
Args:
dependencies_data (dict): mapping of pack dependencies data, of all levels.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
dict: parsed pack metadata.
"""
landing_page_sections = mp_statistics.StatisticsHandler.get_landing_page_sections()
displayed_dependencies = self.user_metadata.get(Metadata.DISPLAYED_IMAGES, [])
trending_packs = None
pack_dependencies_by_download_count = displayed_dependencies
if not format_dependencies_only:
# ===== Pack Regular Attributes =====
self._support_type = self.user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self._support_details = self._create_support_section(
support_type=self._support_type, support_url=self.user_metadata.get(Metadata.URL),
support_email=self.user_metadata.get(Metadata.EMAIL)
)
self._author = self._get_author(
support_type=self._support_type, author=self.user_metadata.get(Metadata.AUTHOR, ''))
self._certification = self._get_certification(
support_type=self._support_type, certification=self.user_metadata.get(Metadata.CERTIFICATION)
)
self._legacy = self.user_metadata.get(Metadata.LEGACY, True)
self._create_date = self._get_pack_creation_date(index_folder_path)
self._update_date = self._get_pack_update_date(index_folder_path, pack_was_modified)
self._use_cases = input_to_list(input_data=self.user_metadata.get(Metadata.USE_CASES), capitalize_input=True)
self._categories = input_to_list(input_data=self.user_metadata.get(Metadata.CATEGORIES), capitalize_input=True)
self._keywords = input_to_list(self.user_metadata.get(Metadata.KEY_WORDS))
self._dependencies = self._parse_pack_dependencies(
self.user_metadata.get(Metadata.DEPENDENCIES, {}), dependencies_data)
# ===== Pack Private Attributes =====
if not format_dependencies_only:
self._is_private_pack = Metadata.PARTNER_ID in self.user_metadata
self._is_premium = self._is_private_pack
self._preview_only = get_valid_bool(self.user_metadata.get(Metadata.PREVIEW_ONLY, False))
self._price = convert_price(pack_id=self._pack_name, price_value_input=self.user_metadata.get('price'))
if self._is_private_pack:
self._vendor_id = self.user_metadata.get(Metadata.VENDOR_ID, "")
self._partner_id = self.user_metadata.get(Metadata.PARTNER_ID, "")
self._partner_name = self.user_metadata.get(Metadata.PARTNER_NAME, "")
self._content_commit_hash = self.user_metadata.get(Metadata.CONTENT_COMMIT_HASH, "")
# Currently all content packs are legacy.
# Since premium packs cannot be legacy, we directly set this attribute to false.
self._legacy = False
# ===== Pack Statistics Attributes =====
if not self._is_private_pack and statistics_handler: # Public Content case
self._pack_statistics_handler = mp_statistics.PackStatisticsHandler(
self._pack_name, statistics_handler.packs_statistics_df, statistics_handler.packs_download_count_desc,
displayed_dependencies
)
self._downloads_count = self._pack_statistics_handler.download_count
trending_packs = statistics_handler.trending_packs
pack_dependencies_by_download_count = self._pack_statistics_handler.displayed_dependencies_sorted
self._tags = self._collect_pack_tags(self.user_metadata, landing_page_sections, trending_packs)
self._search_rank = mp_statistics.PackStatisticsHandler.calculate_search_rank(
tags=self._tags, certification=self._certification, content_items=self._content_items
)
self._related_integration_images = self._get_all_pack_images(
self._displayed_integration_images, displayed_dependencies, dependencies_data,
pack_dependencies_by_download_count
)
def format_metadata(self, index_folder_path, packs_dependencies_mapping, build_number, commit_hash,
pack_was_modified, statistics_handler, pack_names=None, format_dependencies_only=False):
""" Re-formats metadata according to marketplace metadata format defined in issue #19786 and writes back
the result.
Args:
index_folder_path (str): downloaded index folder directory path.
packs_dependencies_mapping (dict): all packs dependencies lookup mapping.
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
pack_was_modified (bool): Indicates whether the pack was modified or not.
statistics_handler (StatisticsHandler): The marketplace statistics handler
pack_names (set): List of all packs.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
bool: True is returned in case metadata file was parsed successfully, otherwise False.
bool: True is returned in pack is missing dependencies.
"""
task_status = False
pack_names = pack_names if pack_names else []
is_missing_dependencies = False
try:
self.set_pack_dependencies(packs_dependencies_mapping)
if Metadata.DISPLAYED_IMAGES not in self.user_metadata and self._user_metadata:
self._user_metadata[Metadata.DISPLAYED_IMAGES] = packs_dependencies_mapping.get(
self._pack_name, {}).get(Metadata.DISPLAYED_IMAGES, [])
logging.info(f"Adding auto generated display images for {self._pack_name} pack")
dependencies_data, is_missing_dependencies = \
self._load_pack_dependencies(index_folder_path, pack_names)
self._enhance_pack_attributes(
index_folder_path, pack_was_modified, dependencies_data, statistics_handler,
format_dependencies_only
)
formatted_metadata = self._parse_pack_metadata(build_number, commit_hash)
metadata_path = os.path.join(self._pack_path, Pack.METADATA) # deployed metadata path after parsing
json_write(metadata_path, formatted_metadata) # writing back parsed metadata
logging.success(f"Finished formatting {self._pack_name} packs's {Pack.METADATA} {metadata_path} file.")
task_status = True
except Exception as e:
logging.exception(f"Failed in formatting {self._pack_name} pack metadata. Additional Info: {str(e)}")
finally:
return task_status, is_missing_dependencies
@staticmethod
def pack_created_in_time_delta(pack_name, time_delta: timedelta, index_folder_path: str) -> bool:
"""
Checks if pack created before delta specified in the 'time_delta' argument and return boolean according
to the result
Args:
pack_name: the pack name.
time_delta: time_delta to check if pack was created before.
index_folder_path: downloaded index folder directory path.
Returns:
True if pack was created before the time_delta from now, and False otherwise.
"""
pack_creation_time_str = Pack._calculate_pack_creation_date(pack_name, index_folder_path)
return datetime.utcnow() - datetime.strptime(pack_creation_time_str, Metadata.DATE_FORMAT) < time_delta
def _get_pack_creation_date(self, index_folder_path):
return self._calculate_pack_creation_date(self._pack_name, index_folder_path)
@staticmethod
def _calculate_pack_creation_date(pack_name, index_folder_path):
""" Gets the pack created date.
Args:
index_folder_path (str): downloaded index folder directory path.
Returns:
datetime: Pack created date.
"""
created_time = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
metadata = load_json(os.path.join(index_folder_path, pack_name, Pack.METADATA))
if metadata:
if metadata.get(Metadata.CREATED):
created_time = metadata.get(Metadata.CREATED, '')
else:
raise Exception(f'The metadata file of the {pack_name} pack does not contain "{Metadata.CREATED}" time')
return created_time
def _get_pack_update_date(self, index_folder_path, pack_was_modified):
""" Gets the pack update date.
Args:
index_folder_path (str): downloaded index folder directory path.
pack_was_modified (bool): whether the pack was modified or not.
Returns:
datetime: Pack update date.
"""
latest_changelog_released_date = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
changelog = load_json(os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON))
if changelog and not pack_was_modified:
packs_latest_release_notes = max(LooseVersion(ver) for ver in changelog)
latest_changelog_version = changelog.get(packs_latest_release_notes.vstring, {})
latest_changelog_released_date = latest_changelog_version.get('released')
return latest_changelog_released_date
def set_pack_dependencies(self, packs_dependencies_mapping):
pack_dependencies = packs_dependencies_mapping.get(self._pack_name, {}).get(Metadata.DEPENDENCIES, {})
if Metadata.DEPENDENCIES not in self.user_metadata and self._user_metadata:
self._user_metadata[Metadata.DEPENDENCIES] = {}
# If it is a core pack, check that no new mandatory packs (that are not core packs) were added
# They can be overridden in the user metadata to be not mandatory so we need to check there as well
if self._pack_name in GCPConfig.CORE_PACKS_LIST:
mandatory_dependencies = [k for k, v in pack_dependencies.items()
if v.get(Metadata.MANDATORY, False) is True
and k not in GCPConfig.CORE_PACKS_LIST
and k not in self.user_metadata[Metadata.DEPENDENCIES].keys()]
if mandatory_dependencies:
raise Exception(f'New mandatory dependencies {mandatory_dependencies} were '
f'found in the core pack {self._pack_name}')
pack_dependencies.update(self.user_metadata[Metadata.DEPENDENCIES])
if self._user_metadata:
self._user_metadata[Metadata.DEPENDENCIES] = pack_dependencies
def prepare_for_index_upload(self):
""" Removes and leaves only necessary files in pack folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
files_to_leave = [Pack.METADATA, Pack.CHANGELOG_JSON, Pack.README]
try:
for file_or_folder in os.listdir(self._pack_path):
files_or_folder_path = os.path.join(self._pack_path, file_or_folder)
if file_or_folder in files_to_leave:
continue
if os.path.isdir(files_or_folder_path):
shutil.rmtree(files_or_folder_path)
else:
os.remove(files_or_folder_path)
task_status = True
except Exception:
logging.exception(f"Failed in preparing index for upload in {self._pack_name} pack.")
finally:
return task_status
@staticmethod
def _get_spitted_yml_image_data(root, target_folder_files):
""" Retrieves pack integration image and integration display name and returns binding image data.
Args:
root (str): full path to the target folder to search integration image.
target_folder_files (list): list of files inside the targeted folder.
Returns:
dict: path to integration image and display name of the integration.
"""
image_data = {}
for pack_file in target_folder_files:
if pack_file.startswith('.'):
continue
if pack_file.endswith('_image.png'):
image_data['repo_image_path'] = os.path.join(root, pack_file)
elif pack_file.endswith('.yml'):
with open(os.path.join(root, pack_file), 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
return image_data
def _get_image_data_from_yml(self, pack_file_path):
""" Creates temporary image file and retrieves integration display name.
Args:
pack_file_path (str): full path to the target yml_path integration yml to search integration image.
Returns:
dict: path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
"""
image_data = {}
if pack_file_path.endswith('.yml'):
with open(pack_file_path, 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
# create temporary file of base64 decoded data
integration_name = integration_yml.get('name', '')
base64_image = integration_yml['image'].split(',')[1] if integration_yml.get('image') else None
if not base64_image:
logging.warning(f"{integration_name} integration image was not found in {self._pack_name} pack")
return {}
temp_image_name = f'{integration_name.replace(" ", "")}_image.png'
temp_image_path = os.path.join(self._pack_path, temp_image_name)
with open(temp_image_path, 'wb') as image_file:
image_file.write(base64.b64decode(base64_image))
self._remove_files_list.append(temp_image_name) # add temporary file to tracking list
image_data['image_path'] = temp_image_path
image_data['integration_path_basename'] = os.path.basename(pack_file_path)
logging.info(f"Created temporary integration {image_data['display_name']} image for {self._pack_name} pack")
return image_data
def _search_for_images(self, target_folder):
""" Searches for png files in targeted folder.
Args:
target_folder (str): full path to directory to search.
Returns:
list: list of dictionaries that include image path and display name of integration, example:
[{'image_path': image_path, 'display_name': integration_display_name},...]
"""
target_folder_path = os.path.join(self._pack_path, target_folder)
images_list = []
if os.path.exists(target_folder_path):
for pack_item in os.scandir(target_folder_path):
image_data = self._get_image_data_from_yml(pack_item.path)
if image_data and image_data not in images_list:
images_list.append(image_data)
return images_list
def check_if_exists_in_index(self, index_folder_path):
""" Checks if pack is sub-folder of downloaded index.
Args:
index_folder_path (str): index folder full path.
Returns:
bool: whether the operation succeeded.
bool: whether pack exists in index folder.
"""
task_status, exists_in_index = False, False
try:
if not os.path.exists(index_folder_path):
logging.error(f"{GCPConfig.INDEX_NAME} does not exists.")
return task_status, exists_in_index
exists_in_index = os.path.exists(os.path.join(index_folder_path, self._pack_name))
task_status = True
except Exception:
logging.exception(f"Failed searching {self._pack_name} pack in {GCPConfig.INDEX_NAME}")
finally:
return task_status, exists_in_index
@staticmethod
def remove_contrib_suffix_from_name(display_name: str) -> str:
""" Removes the contribution details suffix from the integration's display name
Args:
display_name (str): The integration display name.
Returns:
str: The display name without the contrib details suffix
"""
contribution_suffixes = ('(Partner Contribution)', '(Developer Contribution)', '(Community Contribution)')
for suffix in contribution_suffixes:
index = display_name.find(suffix)
if index != -1:
display_name = display_name[:index].rstrip(' ')
break
return display_name
@staticmethod
def need_to_upload_integration_image(image_data: dict, integration_dirs: list, unified_integrations: list):
""" Checks whether needs to upload the integration image or not.
We upload in one of the two cases:
1. The integration_path_basename is one of the integration dirs detected
2. The integration_path_basename is one of the added/modified unified integrations
Args:
image_data (dict): path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
integration_dirs (list): The list of integrations to search in for images
unified_integrations (list): The list of unified integrations to upload their image
Returns:
bool: True if we need to upload the image or not
"""
integration_path_basename = image_data['integration_path_basename']
return any([
re.findall(BucketUploadFlow.INTEGRATION_DIR_REGEX, integration_path_basename)[0] in integration_dirs,
integration_path_basename in unified_integrations
])
def upload_integration_images(self, storage_bucket, storage_base_path, diff_files_list=None, detect_changes=False):
""" Uploads pack integrations images to gcs.
The returned result of integration section are defined in issue #19786.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where image will be uploaded.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload all images in any case.
Returns:
bool: whether the operation succeeded.
list: list of dictionaries with uploaded pack integration images.
"""
task_status = True
integration_images = []
integration_dirs = []
unified_integrations = []
try:
if detect_changes:
# detect added/modified integration images
for file in diff_files_list:
if self.is_integration_image(file.a_path):
# integration dir name will show up in the unified integration file path in content_packs.zip
integration_dirs.append(os.path.basename(os.path.dirname(file.a_path)))
elif self.is_unified_integration(file.a_path):
# if the file found in the diff is a unified integration we upload its image
unified_integrations.append(os.path.basename(file.a_path))
pack_local_images = self._search_for_images(target_folder=PackFolders.INTEGRATIONS.value)
if not pack_local_images:
return True # return empty list if no images were found
pack_storage_root_path = os.path.join(storage_base_path, self._pack_name)
for image_data in pack_local_images:
image_path = image_data.get('image_path')
if not image_path:
raise Exception(f"{self._pack_name} pack integration image was not found")
image_name = os.path.basename(image_path)
image_storage_path = os.path.join(pack_storage_root_path, image_name)
pack_image_blob = storage_bucket.blob(image_storage_path)
if not detect_changes or \
self.need_to_upload_integration_image(image_data, integration_dirs, unified_integrations):
# upload the image if needed
logging.info(f"Uploading image: {image_name} of integration: {image_data.get('display_name')} "
f"from pack: {self._pack_name}")
with open(image_path, "rb") as image_file:
pack_image_blob.upload_from_file(image_file)
self._uploaded_integration_images.append(image_name)
if GCPConfig.USE_GCS_RELATIVE_PATH:
image_gcs_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, image_name))
else:
image_gcs_path = pack_image_blob.public_url
integration_name = image_data.get('display_name', '')
if self.support_type != Metadata.XSOAR_SUPPORT:
integration_name = self.remove_contrib_suffix_from_name(integration_name)
integration_images.append({
'name': integration_name,
'imagePath': image_gcs_path
})
if self._uploaded_integration_images:
logging.info(f"Uploaded {len(self._uploaded_integration_images)} images for {self._pack_name} pack.")
except Exception as e:
task_status = False
logging.exception(f"Failed to upload {self._pack_name} pack integration images. Additional Info: {str(e)}")
finally:
self._displayed_integration_images = integration_images
return task_status
def copy_integration_images(self, production_bucket, build_bucket, images_data, storage_base_path, build_bucket_base_path):
""" Copies all pack's integration images from the build bucket to the production bucket
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
Returns:
bool: Whether the operation succeeded.
"""
task_status = True
num_copied_images = 0
err_msg = f"Failed copying {self._pack_name} pack integrations images."
pc_uploaded_integration_images = images_data.get(self._pack_name, {}).get(BucketUploadFlow.INTEGRATIONS, [])
for image_name in pc_uploaded_integration_images:
build_bucket_image_path = os.path.join(build_bucket_base_path, self._pack_name, image_name)
build_bucket_image_blob = build_bucket.blob(build_bucket_image_path)
if not build_bucket_image_blob.exists():
logging.error(f"Found changed/added integration image {image_name} in content repo but "
f"{build_bucket_image_path} does not exist in build bucket")
task_status = False
else:
logging.info(f"Copying {self._pack_name} pack integration image: {image_name}")
try:
copied_blob = build_bucket.copy_blob(
blob=build_bucket_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(storage_base_path, self._pack_name, image_name)
)
if not copied_blob.exists():
logging.error(f"Copy {self._pack_name} integration image: {build_bucket_image_blob.name} "
f"blob to {copied_blob.name} blob failed.")
task_status = False
else:
num_copied_images += 1
except Exception as e:
logging.exception(f"{err_msg}. Additional Info: {str(e)}")
return False
if not task_status:
logging.error(err_msg)
else:
if num_copied_images == 0:
logging.info(f"No added/modified integration images were detected in {self._pack_name} pack.")
else:
logging.success(f"Copied {num_copied_images} images for {self._pack_name} pack.")
return task_status
def upload_author_image(self, storage_bucket, storage_base_path, diff_files_list=None, detect_changes=False):
""" Uploads pack author image to gcs.
Searches for `Author_image.png` and uploads author image to gcs. In case no such image was found,
default Base pack image path is used and it's gcp path is returned.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where author image will be uploaded.
storage_base_path (str): the path under the bucket to upload to.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload the author image in any case.
Returns:
bool: whether the operation succeeded.
str: public gcp path of author image.
"""
task_status = True
author_image_storage_path = ""
try:
author_image_path = os.path.join(self._pack_path, Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if os.path.exists(author_image_path):
image_to_upload_storage_path = os.path.join(storage_base_path, self._pack_name,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
pack_author_image_blob = storage_bucket.blob(image_to_upload_storage_path)
if not detect_changes or any(self.is_author_image(file.a_path) for file in diff_files_list):
# upload the image if needed
with open(author_image_path, "rb") as author_image_file:
pack_author_image_blob.upload_from_file(author_image_file)
self._uploaded_author_image = True
logging.success(f"Uploaded successfully {self._pack_name} pack author image")
if GCPConfig.USE_GCS_RELATIVE_PATH:
author_image_storage_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, Pack.AUTHOR_IMAGE_NAME))
else:
author_image_storage_path = pack_author_image_blob.public_url
elif self.support_type == Metadata.XSOAR_SUPPORT: # use default Base pack image for xsoar supported packs
author_image_storage_path = os.path.join(GCPConfig.IMAGES_BASE_PATH, GCPConfig.BASE_PACK,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if not GCPConfig.USE_GCS_RELATIVE_PATH:
# disable-secrets-detection-start
author_image_storage_path = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name,
author_image_storage_path)
# disable-secrets-detection-end
logging.info((f"Skipping uploading of {self._pack_name} pack author image "
f"and use default {GCPConfig.BASE_PACK} pack image"))
else:
logging.info(f"Skipping uploading of {self._pack_name} pack author image. "
f"The pack is defined as {self.support_type} support type")
except Exception:
logging.exception(f"Failed uploading {self._pack_name} pack author image.")
task_status = False
author_image_storage_path = ""
finally:
self._author_image = author_image_storage_path
return task_status
def copy_author_image(self, production_bucket, build_bucket, images_data, storage_base_path, build_bucket_base_path):
""" Copies pack's author image from the build bucket to the production bucket
Searches for `Author_image.png`, In case no such image was found, default Base pack image path is used and
it's gcp path is returned.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
Returns:
bool: Whether the operation succeeded.
"""
if images_data.get(self._pack_name, {}).get(BucketUploadFlow.AUTHOR, False):
build_author_image_path = os.path.join(build_bucket_base_path, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
build_author_image_blob = build_bucket.blob(build_author_image_path)
if build_author_image_blob.exists():
try:
copied_blob = build_bucket.copy_blob(
blob=build_author_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(storage_base_path, self._pack_name,
Pack.AUTHOR_IMAGE_NAME))
if not copied_blob.exists():
logging.error(f"Failed copying {self._pack_name} pack author image.")
return False
else:
logging.success(f"Copied successfully {self._pack_name} pack author image.")
return True
except Exception as e:
logging.exception(f"Failed copying {Pack.AUTHOR_IMAGE_NAME} for {self._pack_name} pack. "
f"Additional Info: {str(e)}")
return False
else:
logging.error(f"Found changed/added author image in content repo for {self._pack_name} pack but "
f"image does not exist in build bucket in path {build_author_image_path}.")
return False
else:
logging.info(f"No added/modified author image was detected in {self._pack_name} pack.")
return True
def cleanup(self):
""" Finalization action, removes extracted pack folder.
"""
if os.path.exists(self._pack_path):
shutil.rmtree(self._pack_path)
logging.info(f"Cleanup {self._pack_name} pack from: {self._pack_path}")
def is_changelog_exists(self):
""" Indicates whether the local changelog of a given pack exists or not
Returns:
bool: The answer
"""
return os.path.isfile(os.path.join(self._pack_path, Pack.CHANGELOG_JSON))
def is_failed_to_upload(self, failed_packs_dict):
"""
Checks if the pack was failed to upload in Prepare Content step in Create Instances job
Args:
failed_packs_dict (dict): The failed packs file
Returns:
bool: Whether the operation succeeded.
str: The pack's failing status
"""
if self._pack_name in failed_packs_dict:
return True, failed_packs_dict[self._pack_name].get('status')
else:
return False, str()
def is_integration_image(self, file_path: str):
""" Indicates whether a file_path is an integration image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an integration image or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name)),
file_path.endswith('.png'),
'image' in os.path.basename(file_path.lower()),
os.path.basename(file_path) != Pack.AUTHOR_IMAGE_NAME
])
def is_author_image(self, file_path: str):
""" Indicates whether a file_path is an author image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an author image or False otherwise
"""
return file_path == os.path.join(PACKS_FOLDER, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
def is_unified_integration(self, file_path: str):
""" Indicates whether a file_path is a unified integration yml file or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is a unified integration or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name, PackFolders.INTEGRATIONS.value)),
os.path.basename(os.path.dirname(file_path)) == PackFolders.INTEGRATIONS.value,
os.path.basename(file_path).startswith('integration'),
os.path.basename(file_path).endswith('.yml')
])
def add_bc_entries_if_needed(self, release_notes_dir: str, changelog: Dict[str, Any]) -> None:
"""
Receives changelog, checks if there exists a BC version in each changelog entry (as changelog entry might be
zipped into few RN versions, check if at least one of the versions is BC).
Check if RN is BC is done by doing the following:
1) Check if RN has corresponding config file, e.g 1_0_1.md has corresponding 1_0_1.json file.
2) If it does, check if `isBreakingChanges` field is true
If such version exists, adds a
true value to 'breakingChanges' field.
if JSON file also has breakingChangesNotes configures, adds `breakingChangesNotes` field to changelog file.
This function iterates every entry in changelog because it takes into consideration four scenarios:
a) Entry without breaking changes, changes to entry with breaking changes (because at least one of the
versions in the entry was marked as breaking changes).
b) Entry without breaking changes, does not change.
c) Entry with breaking changes, changes to entry without breaking changes (because all the BC versions
corresponding to the changelog entry were re-marked as not BC).
d) Entry with breaking changes, does not change.
Args:
release_notes_dir (str): RN dir path.
changelog (Dict[str, Any]): Changelog data represented as a dict.
Returns:
(None): Modifies changelog, adds bool value to 'breakingChanges' and `breakingChangesNotes` fields to every
changelog entry, according to the logic described above.
"""
if not os.path.exists(release_notes_dir):
return
bc_version_to_text: Dict[str, Optional[str]] = self._breaking_changes_versions_to_text(release_notes_dir)
loose_versions: List[LooseVersion] = [LooseVersion(bc_ver) for bc_ver in bc_version_to_text]
predecessor_version: LooseVersion = LooseVersion('0.0.0')
for changelog_entry in sorted(changelog.keys(), key=LooseVersion):
rn_loose_version: LooseVersion = LooseVersion(changelog_entry)
if bc_versions := self._changelog_entry_bc_versions(predecessor_version, rn_loose_version, loose_versions,
bc_version_to_text):
logging.info(f'Changelog entry {changelog_entry} contains BC versions')
changelog[changelog_entry]['breakingChanges'] = True
if bc_text := self._calculate_bc_text(release_notes_dir, bc_versions):
changelog[changelog_entry]['breakingChangesNotes'] = bc_text
else:
changelog[changelog_entry].pop('breakingChangesNotes', None)
else:
changelog[changelog_entry].pop('breakingChanges', None)
predecessor_version = rn_loose_version
def _calculate_bc_text(self, release_notes_dir: str, bc_version_to_text: Dict[str, Optional[str]]) -> Optional[str]:
"""
Receives BC versions to text dict for current changelog entry. Calculates text for BC entry.
Args:
release_notes_dir (str): RN dir path.
bc_version_to_text (Dict[str, Optional[str]): {bc version, bc_text}
Returns:
(Optional[str]): Text for entry if such was added.
If none is returned, server will list the full RN as the BC notes instead.
"""
# Handle cases of one BC version in entry.
if len(bc_version_to_text) == 1:
return list(bc_version_to_text.values())[0]
# Handle cases of two or more BC versions in entry.
text_of_bc_versions, bc_without_text = self._split_bc_versions_with_and_without_text(bc_version_to_text)
# Case one: Not even one BC version contains breaking text.
if len(text_of_bc_versions) == 0:
return None
# Case two: Only part of BC versions contains breaking text.
elif len(text_of_bc_versions) < len(bc_version_to_text):
return self._handle_many_bc_versions_some_with_text(release_notes_dir, text_of_bc_versions, bc_without_text)
# Case 3: All BC versions contains text.
else:
# Important: Currently, implementation of aggregating BCs was decided to concat between them
# In the future this might be needed to re-thought.
return '\n'.join(bc_version_to_text.values()) # type: ignore[arg-type]
def _handle_many_bc_versions_some_with_text(self, release_notes_dir: str, text_of_bc_versions: List[str],
bc_versions_without_text: List[str], ) -> str:
"""
Calculates text for changelog entry where some BC versions contain text and some don't.
Important: Currently, implementation of aggregating BCs was decided to concat between them (and if BC version
does not have a BC text - concat the whole RN). In the future this might be needed to re-thought.
Args:
release_notes_dir (str): RN dir path.
text_of_bc_versions ([List[str]): List of text of BC versions with text.
bc_versions_without_text ([List[str]): List of BC versions without text.
Returns:
(str): Text for BC entry.
"""
bc_with_text_str = '\n'.join(text_of_bc_versions)
rn_file_names_without_text = [f'''{bc_version.replace('.', '_')}.md''' for
bc_version in bc_versions_without_text]
other_rn_text: str = self._get_release_notes_concat_str(release_notes_dir, rn_file_names_without_text)
if not other_rn_text:
logging.error('No RN text, although text was expected to be found for versions'
f' {rn_file_names_without_text}.')
return f'{bc_with_text_str}{other_rn_text}'
@staticmethod
def _get_release_notes_concat_str(release_notes_dir: str, rn_file_names: List[str]) -> str:
"""
Concat all RN data found in given `rn_file_names`.
Args:
release_notes_dir (str): RN dir path.
rn_file_names (List[str]): List of all RN files to concat their data.
Returns:
(str): Concat RN data
"""
concat_str: str = ''
for rn_file_name in rn_file_names:
rn_file_path = os.path.join(release_notes_dir, rn_file_name)
with open(rn_file_path, 'r') as f:
# Will make the concat string start with new line on purpose.
concat_str = f'{concat_str}\n{f.read()}'
return concat_str
@staticmethod
def _split_bc_versions_with_and_without_text(bc_versions: Dict[str, Optional[str]]) -> Tuple[List[str], List[str]]:
"""
Splits BCs to tuple of BCs text of BCs containing text, and BCs versions that do not contain BC text.
Args:
bc_versions (Dict[str, Optional[str]): BC versions mapped to text if exists.
Returns:
(Tuple[List[str], List[str]]): (text of bc versions with text, bc_versions_without_text).
"""
text_of_bc_versions_with_tests: List[str] = []
bc_versions_without_text: List[str] = []
for bc_version, bc_text in bc_versions.items():
if bc_text:
text_of_bc_versions_with_tests.append(bc_text)
else:
bc_versions_without_text.append(bc_version)
return text_of_bc_versions_with_tests, bc_versions_without_text
@staticmethod
def _breaking_changes_versions_to_text(release_notes_dir: str) -> Dict[str, Optional[str]]:
"""
Calculates every BC version in given RN dir and maps it to text if exists.
Currently, text from a BC version is calculated in the following way:
- If RN has `breakingChangesNotes` entry in its corresponding config file, then use the value of that field
as the text of the BC to be represented.
- Else, use the whole RN text as BC text.
Args:
release_notes_dir (str): RN dir path.
Returns:
(Dict[str, Optional[str]]): {dotted_version, text}.
"""
bc_version_to_text: Dict[str, Optional[str]] = dict()
# Get all config files in RN dir
rn_config_file_names = filter_dir_files_by_extension(release_notes_dir, '.json')
for file_name in rn_config_file_names:
file_data: Dict = load_json(os.path.join(release_notes_dir, file_name))
# Check if version is BC
if file_data.get('breakingChanges'):
# Processing name for easier calculations later on
processed_name: str = underscore_file_name_to_dotted_version(file_name)
bc_version_to_text[processed_name] = file_data.get('breakingChangesNotes')
return bc_version_to_text
@staticmethod
def _changelog_entry_bc_versions(predecessor_version: LooseVersion, rn_version: LooseVersion,
breaking_changes_versions: List[LooseVersion],
bc_version_to_text: Dict[str, Optional[str]]) -> Dict[str, Optional[str]]:
"""
Gets all BC versions of given changelog entry, every BC s.t predecessor_version < BC version <= rn_version.
Args:
predecessor_version (LooseVersion): Predecessor version in numeric version order.
rn_version (LooseVersion): RN version of current processed changelog entry.
breaking_changes_versions (List[LooseVersion]): List of BC versions.
bc_version_to_text (Dict[str, Optional[str]): List of all BC to text in the given RN dir.
Returns:
Dict[str, Optional[str]]: Partial list of `bc_version_to_text`, containing only relevant versions between
given versions.
"""
return {bc_ver.vstring: bc_version_to_text.get(bc_ver.vstring) for bc_ver in breaking_changes_versions if
predecessor_version < bc_ver <= rn_version}
# HELPER FUNCTIONS
def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict, dict]:
""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts
Args:
packs_results_file_path (str): The path to the file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
Returns:
dict: The successful packs dict
dict: The failed packs dict
dict : The successful private packs dict
dict: The images data dict
"""
if os.path.exists(packs_results_file_path):
packs_results_file = load_json(packs_results_file_path)
stage_data: dict = packs_results_file.get(stage, {})
successful_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PACKS, {})
failed_packs_dict = stage_data.get(BucketUploadFlow.FAILED_PACKS, {})
successful_private_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {})
images_data_dict = stage_data.get(BucketUploadFlow.IMAGES, {})
return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict
return {}, {}, {}, {}
def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: str, stage: str, successful_packs: list,
failed_packs: list, updated_private_packs: list,
images_data: dict = None):
""" Write the successful and failed packs to the correct section in the packs_results.json file
Args:
packs_results_file_path (str): The path to the pack_results.json file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
successful_packs (list): The list of all successful packs
failed_packs (list): The list of all failed packs
updated_private_packs (list) : The list of all private packs that were updated
images_data (dict): A dict containing all images that were uploaded for each pack
"""
packs_results = load_json(packs_results_file_path)
packs_results[stage] = dict()
if failed_packs:
failed_packs_dict = {
BucketUploadFlow.FAILED_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in failed_packs
}
}
packs_results[stage].update(failed_packs_dict)
logging.debug(f"Failed packs {failed_packs_dict}")
if successful_packs:
successful_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False",
BucketUploadFlow.LATEST_VERSION: pack.latest_version
} for pack in successful_packs
}
}
packs_results[stage].update(successful_packs_dict)
logging.debug(f"Successful packs {successful_packs_dict}")
if updated_private_packs:
successful_private_packs_dict: dict = {
BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS: {pack_name: {} for pack_name in updated_private_packs}
}
packs_results[stage].update(successful_private_packs_dict)
logging.debug(f"Successful private packs {successful_private_packs_dict}")
if images_data:
packs_results[stage].update({BucketUploadFlow.IMAGES: images_data})
logging.debug(f"Images data {images_data}")
if packs_results:
json_write(packs_results_file_path, packs_results)
def load_json(file_path: str) -> dict:
""" Reads and loads json file.
Args:
file_path (str): full path to json file.
Returns:
dict: loaded json file.
"""
try:
if file_path and os.path.exists(file_path):
with open(file_path, 'r') as json_file:
result = json.load(json_file)
else:
result = {}
return result
except json.decoder.JSONDecodeError:
return {}
def json_write(file_path: str, data: Union[list, dict]):
""" Writes given data to a json file
Args:
file_path: The file path
data: The data to write
"""
with open(file_path, "w") as f:
f.write(json.dumps(data, indent=4))
def init_storage_client(service_account=None):
"""Initialize google cloud storage client.
In case of local dev usage the client will be initialized with user default credentials.
Otherwise, client will be initialized from service account json that is stored in CircleCI.
Args:
service_account (str): full path to service account json.
Return:
storage.Client: initialized google cloud storage client.
"""
if service_account:
storage_client = storage.Client.from_service_account_json(service_account)
logging.info("Created gcp service account")
return storage_client
else:
# in case of local dev use, ignored the warning of non use of service account.
warnings.filterwarnings("ignore", message=google.auth._default._CLOUD_SDK_CREDENTIALS_WARNING)
credentials, project = google.auth.default()
storage_client = storage.Client(credentials=credentials, project=project)
logging.info("Created gcp private account")
return storage_client
def input_to_list(input_data, capitalize_input=False):
""" Helper function for handling input list or str from the user.
Args:
input_data (list or str): input from the user to handle.
capitalize_input (boo): whether to capitalize the input list data or not.
Returns:
list: returns the original list or list that was split by comma.
"""
input_data = input_data if input_data else []
input_data = input_data if isinstance(input_data, list) else [s for s in input_data.split(',') if s]
if capitalize_input:
return [" ".join([w.title() if w.islower() else w for w in i.split()]) for i in input_data]
else:
return input_data
def get_valid_bool(bool_input):
""" Converts and returns valid bool.
Returns:
bool: converted bool input.
"""
return bool(strtobool(bool_input)) if isinstance(bool_input, str) else bool_input
def convert_price(pack_id, price_value_input=None):
""" Converts to integer value price input. In case no price input provided, return zero as price.
Args:
pack_id (str): pack unique identifier.
price_value_input (str): price string to convert.
Returns:
int: converted to int pack price.
"""
try:
if not price_value_input:
return 0 # in case no price was supported, return 0
else:
return int(price_value_input) # otherwise convert to int and return result
except Exception:
logging.exception(f"{pack_id} pack price is not valid. The price was set to 0.")
return 0
def get_updated_server_version(current_string_version, compared_content_item, pack_name):
""" Compares two semantic server versions and returns the higher version between them.
Args:
current_string_version (str): current string version.
compared_content_item (dict): compared content item entity.
pack_name (str): the pack name (id).
Returns:
str: latest version between compared versions.
"""
lower_version_result = current_string_version
try:
compared_string_version = compared_content_item.get('fromversion') or compared_content_item.get(
'fromVersion') or "99.99.99"
current_version, compared_version = LooseVersion(current_string_version), LooseVersion(compared_string_version)
if current_version > compared_version:
lower_version_result = compared_string_version
except Exception:
content_item_name = compared_content_item.get('name') or compared_content_item.get(
'display') or compared_content_item.get('id') or compared_content_item.get('details', '')
logging.exception(f"{pack_name} failed in version comparison of content item {content_item_name}.")
finally:
return lower_version_result
def get_content_git_client(content_repo_path: str):
""" Initializes content repo client.
Args:
content_repo_path (str): content repo full path
Returns:
git.repo.base.Repo: content repo object.
"""
return git.Repo(content_repo_path)
def get_recent_commits_data(content_repo: Any, index_folder_path: str, is_bucket_upload_flow: bool,
is_private_build: bool = False, circle_branch: str = "master"):
""" Returns recent commits hashes (of head and remote master)
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: last commit hash of head.
str: previous commit depending on the flow the script is running
"""
return content_repo.head.commit.hexsha, get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow,
is_private_build, circle_branch)
def get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow, is_private_build, circle_branch):
""" If running in bucket upload workflow we want to get the commit in the index which is the index
We've last uploaded to production bucket. Otherwise, we are in a commit workflow and the diff should be from the
head of origin/master
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: previous commit depending on the flow the script is running
"""
if is_bucket_upload_flow:
return get_last_upload_commit_hash(content_repo, index_folder_path)
elif is_private_build:
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
logging.info(f"Using origin/master HEAD~1 commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
else:
if circle_branch == 'master':
head_str = "HEAD~1"
# if circle branch is master than current commit is origin/master HEAD, so we need to diff with HEAD~1
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
else:
head_str = "HEAD"
# else we are on a regular branch and the diff should be done with origin/master HEAD
previous_master_head_commit = content_repo.commit('origin/master').hexsha
logging.info(f"Using origin/master {head_str} commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
def get_last_upload_commit_hash(content_repo, index_folder_path):
"""
Returns the last origin/master commit hash that was uploaded to the bucket
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path: The path to the index folder
Returns:
The commit hash
"""
inner_index_json_path = os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json')
if not os.path.exists(inner_index_json_path):
logging.critical(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
sys.exit(1)
else:
inner_index_json_file = load_json(inner_index_json_path)
if 'commit' in inner_index_json_file:
last_upload_commit_hash = inner_index_json_file['commit']
logging.info(f"Retrieved the last commit that was uploaded to production: {last_upload_commit_hash}")
else:
logging.critical(f"No commit field in {GCPConfig.INDEX_NAME}.json, content: {str(inner_index_json_file)}")
sys.exit(1)
try:
last_upload_commit = content_repo.commit(last_upload_commit_hash).hexsha
logging.info(f"Using commit hash {last_upload_commit} from index.json to diff with.")
return last_upload_commit
except Exception as e:
logging.critical(f'Commit {last_upload_commit_hash} in {GCPConfig.INDEX_NAME}.json does not exist in content '
f'repo. Additional info:\n {e}')
sys.exit(1)
def is_ignored_pack_file(modified_file_path_parts):
""" Indicates whether a pack file needs to be ignored or not.
Args:
modified_file_path_parts: The modified file parts, e.g. if file path is "a/b/c" then the
parts list is ["a", "b", "c"]
Returns:
(bool): True if the file should be ignored, False otherwise
"""
for file_suffix in PackIgnored.ROOT_FILES:
if file_suffix in modified_file_path_parts:
return True
for pack_folder, file_suffixes in PackIgnored.NESTED_FILES.items():
if pack_folder in modified_file_path_parts:
if not file_suffixes: # Ignore all pack folder files
return True
for file_suffix in file_suffixes:
if file_suffix in modified_file_path_parts[-1]:
return True
for pack_folder in PackIgnored.NESTED_DIRS:
if pack_folder in modified_file_path_parts:
pack_folder_path = os.sep.join(modified_file_path_parts[:modified_file_path_parts.index(pack_folder) + 1])
file_path = os.sep.join(modified_file_path_parts)
for folder_path in [f for f in glob.glob(os.path.join(pack_folder_path, '*/*')) if os.path.isdir(f)]:
# Checking for all 2nd level directories. e.g. test_data directory
if file_path.startswith(folder_path):
return True
return False
def filter_dir_files_by_extension(release_notes_dir: str, extension: str) -> List[str]:
"""
Receives path to RN dir, filters only files in RN dir corresponding to the extension.
Needed because RN directory will be extended to contain JSON files for configurations,
see 'release_notes_bc_calculator.py'
Args:
release_notes_dir (str): Path to RN dir
extension (str): Extension to filter by.
Returns:
(List[str]): List of all of the files in directory corresponding to the extension.
"""
return [file_name for file_name in os.listdir(release_notes_dir) if file_name.endswith(extension)]
def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and false otherwise (meaning there are versions in the release notes directory that are
missing in the changelog, therefore they have been aggregated) and this function asserts that.
Note: The comparison is done against the release notes directory to avoid cases where there are missing versions in
the changelog due to inconsistent versions numbering, such as major version bumps. (For example, if the versions
1.2.7 and 1.3.0 are two consecutive keys in the changelog, we need to determine if 1.3.0 has aggregated the versions
1.2.8-1.3.0, OR 1.3.0 is the consecutive version right after 1.2.7 but is a major bump. in order to check that, we
check it against the files in the release notes directory.)
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if not changelog.get(version):
return False
all_rn_versions = []
lowest_version = [LooseVersion('1.0.0')]
for filename in filter_dir_files_by_extension(release_notes_dir, '.md'):
current_version = underscore_file_name_to_dotted_version(filename)
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
def underscore_file_name_to_dotted_version(file_name: str) -> str:
"""
Receives file name with expected format of x_x_x<extension>, and transforms it to dotted string.
Examples
- underscore_file_name_to_dotted_version(1_2_3.md) --> 1.2.3
- underscore_file_name_to_dotted_version(1_4_2.json) --> 1.4.2
Args:
file_name (str): File name.
Returns:
(str): Dotted version of file name
"""
return os.path.splitext(file_name)[0].replace('_', '.')
|
VirusTotal/content
|
Tests/Marketplace/marketplace_services.py
|
Python
|
mit
| 150,166
|
#!/usr/bin/python
# Code sourced from AdaFruit discussion board: https://www.adafruit.com/forums/viewtopic.php?f=8&t=34922 and https://github.com/seanbechhofer/raspberrypi/blob/master/python/TSL2561.py
import sys
import time
import re
import smbus
class Adafruit_I2C(object):
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
try:
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, assume revision 0 like older code for compatibility.
return 0
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Adafruit_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print "I2C: Wrote 0x%02X to register 0x%02X" % (value, reg)
except IOError, err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" %
(value, reg, reg+1))
except IOError, err:
return self.errMsg()
def writeRaw8(self, value):
"Writes an 8-bit value on the bus"
try:
self.bus.write_byte(self.address, value)
if self.debug:
print "I2C: Wrote 0x%02X" % value
except IOError, err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print "I2C: Writing list to register 0x%02X:" % reg
print list
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError, err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print results
return results
except IOError, err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readU16(self, reg, little_endian=True):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
def readS16(self, reg, little_endian=True):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.readU16(reg,little_endian)
if result > 32767: result -= 65536
return result
except IOError, err:
return self.errMsg()
class TSL2561:
i2c = None
def __init__(self, address=0x39, debug=0, pause=0.8, gain=0):
self.i2c = Adafruit_I2C(address)
self.address = address
self.pause = pause
self.debug = debug
self.gain = gain
self.i2c.write8(0x80, 0x03) # enable the device
def setGain(self,gain=1):
""" Set the gain """
if (gain != self.gain):
if (gain==1):
self.i2c.write8(0x81, 0x02) # set gain = 1X and timing = 402 mSec
if (self.debug):
print "Setting low gain"
else:
self.i2c.write8(0x81, 0x12) # set gain = 16X and timing = 402 mSec
if (self.debug):
print "Setting high gain"
self.gain=gain # safe gain for calculation
time.sleep(self.pause) # pause for integration (self.pause must be bigger than integration time)
def readWord(self, reg):
"""Reads a word from the I2C device"""
try:
wordval = self.i2c.readU16(reg)
newval = self.i2c.reverseByteOrder(wordval)
if (self.debug):
print("I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, wordval & 0xFFFF, reg))
return newval
except IOError:
print("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readFull(self, reg=0x8C):
"""Reads visible+IR diode from the I2C device"""
return self.readWord(reg)
def readIR(self, reg=0x8E):
"""Reads IR only diode from the I2C device"""
return self.readWord(reg)
def readLux(self, gain = 0):
"""Grabs a lux reading either with autoranging (gain=0) or with a specified gain (1, 16)"""
if (gain == 1 or gain == 16):
self.setGain(gain) # low/highGain
ambient = self.readFull()
IR = self.readIR()
elif (gain==0): # auto gain
self.setGain(16) # first try highGain
ambient = self.readFull()
if (ambient < 65535):
IR = self.readIR()
if (ambient >= 65535 or IR >= 65535): # value(s) exeed(s) datarange
self.setGain(1) # set lowGain
ambient = self.readFull()
IR = self.readIR()
if (self.gain==1):
ambient *= 16 # scale 1x to 16x
IR *= 16 # scale 1x to 16x
if ambient != 0: ratio = (IR / float(ambient)) # changed to make it run under python 2
else: ratio = 0
if (self.debug):
print "IR Result", IR
print "Ambient Result", ambient
if ((ratio >= 0) & (ratio <= 0.52)):
lux = (0.0315 * ambient) - (0.0593 * ambient * (ratio**1.4))
elif (ratio <= 0.65):
lux = (0.0229 * ambient) - (0.0291 * IR)
elif (ratio <= 0.80):
lux = (0.0157 * ambient) - (0.018 * IR)
elif (ratio <= 1.3):
lux = (0.00338 * ambient) - (0.0026 * IR)
elif (ratio > 1.3):
lux = 0
return lux
def readLuxRetry(self):
test_count = 0
lux = 0
while test_count < 10:
lux = self.readLux(self.gain)
test_count += 1
if lux != 0:
lux = float(round(lux,2))
break
time.sleep(2)
return lux
# if __name__ == "__main__":
# tsl=TSL2561(debug=True)
# tsl2=TSL2561(debug=True,gain=16)
# tsl3=TSL2561(debug=True,gain=1)
# print "LUX HIGH GAIN ", tsl2.readLuxRetry()
# print "LUX LOW GAIN ", tsl3.readLuxRetry()
# print "LUX AUTO GAIN ", tsl.readLuxRetry()
|
arek125/remote-GPIO-control-server
|
tsl2561.py
|
Python
|
mit
| 9,124
|
from .. import util
from ..util import sqla_compat
from . import schemaobj
from sqlalchemy.types import NULLTYPE
from .base import Operations, BatchOperations
import re
class MigrateOperation(object):
"""base class for migration command and organization objects.
This system is part of the operation extensibility API.
.. versionadded:: 0.8.0
.. seealso::
:ref:`operation_objects`
:ref:`operation_plugins`
:ref:`customizing_revision`
"""
@util.memoized_property
def info(self):
"""A dictionary that may be used to store arbitrary information
along with this :class:`.MigrateOperation` object.
"""
return {}
class AddConstraintOp(MigrateOperation):
"""Represent an add constraint operation."""
@property
def constraint_type(self):
raise NotImplementedError()
@classmethod
def from_constraint(cls, constraint):
funcs = {
"unique_constraint": CreateUniqueConstraintOp.from_constraint,
"foreign_key_constraint": CreateForeignKeyOp.from_constraint,
"primary_key_constraint": CreatePrimaryKeyOp.from_constraint,
"check_constraint": CreateCheckConstraintOp.from_constraint,
"column_check_constraint": CreateCheckConstraintOp.from_constraint,
}
return funcs[constraint.__visit_name__](constraint)
def reverse(self):
return DropConstraintOp.from_constraint(self.to_constraint())
def to_diff_tuple(self):
return ("add_constraint", self.to_constraint())
@Operations.register_operation("drop_constraint")
@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
class DropConstraintOp(MigrateOperation):
"""Represent a drop constraint operation."""
def __init__(
self,
constraint_name, table_name, type_=None, schema=None,
_orig_constraint=None):
self.constraint_name = constraint_name
self.table_name = table_name
self.constraint_type = type_
self.schema = schema
self._orig_constraint = _orig_constraint
def reverse(self):
if self._orig_constraint is None:
raise ValueError(
"operation is not reversible; "
"original constraint is not present")
return AddConstraintOp.from_constraint(self._orig_constraint)
def to_diff_tuple(self):
if self.constraint_type == "foreignkey":
return ("remove_fk", self.to_constraint())
else:
return ("remove_constraint", self.to_constraint())
@classmethod
def from_constraint(cls, constraint):
types = {
"unique_constraint": "unique",
"foreign_key_constraint": "foreignkey",
"primary_key_constraint": "primary",
"check_constraint": "check",
"column_check_constraint": "check",
}
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
schema=constraint_table.schema,
type_=types[constraint.__visit_name__],
_orig_constraint=constraint
)
def to_constraint(self):
if self._orig_constraint is not None:
return self._orig_constraint
else:
raise ValueError(
"constraint cannot be produced; "
"original constraint is not present")
@classmethod
@util._with_legacy_names([
("type", "type_"),
("name", "constraint_name"),
])
def drop_constraint(
cls, operations, constraint_name, table_name,
type_=None, schema=None):
"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
:param constraint_name: name of the constraint.
:param table_name: table name.
:param ``type_``: optional, required on MySQL. can be
'foreignkey', 'primary', 'unique', or 'check'.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
op = cls(constraint_name, table_name, type_=type_, schema=schema)
return operations.invoke(op)
@classmethod
def batch_drop_constraint(cls, operations, constraint_name, type_=None):
"""Issue a "drop constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.drop_constraint`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
op = cls(
constraint_name, operations.impl.table_name,
type_=type_, schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("create_primary_key")
@BatchOperations.register_operation(
"create_primary_key", "batch_create_primary_key")
class CreatePrimaryKeyOp(AddConstraintOp):
"""Represent a create primary key operation."""
constraint_type = "primarykey"
def __init__(
self, constraint_name, table_name, columns,
schema=None, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint(cls, constraint):
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
constraint.columns,
schema=constraint_table.schema,
_orig_constraint=constraint
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.primary_key_constraint(
self.constraint_name, self.table_name,
self.columns, schema=self.schema)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('cols', 'columns')
])
def create_primary_key(
cls, operations,
constraint_name, table_name, columns, schema=None):
"""Issue a "create primary key" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_primary_key(
"pk_my_table", "my_table",
["id", "version"]
)
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.PrimaryKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the primary key constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the target table.
:param columns: a list of string column names to be applied to the
primary key constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* cols -> columns
"""
op = cls(constraint_name, table_name, columns, schema)
return operations.invoke(op)
@classmethod
def batch_create_primary_key(cls, operations, constraint_name, columns):
"""Issue a "create primary key" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_primary_key`
"""
op = cls(
constraint_name, operations.impl.table_name, columns,
schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("create_unique_constraint")
@BatchOperations.register_operation(
"create_unique_constraint", "batch_create_unique_constraint")
class CreateUniqueConstraintOp(AddConstraintOp):
"""Represent a create unique constraint operation."""
constraint_type = "unique"
def __init__(
self, constraint_name, table_name,
columns, schema=None, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint(cls, constraint):
constraint_table = sqla_compat._table_for_constraint(constraint)
kw = {}
if constraint.deferrable:
kw['deferrable'] = constraint.deferrable
if constraint.initially:
kw['initially'] = constraint.initially
return cls(
constraint.name,
constraint_table.name,
[c.name for c in constraint.columns],
schema=constraint_table.schema,
_orig_constraint=constraint,
**kw
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.unique_constraint(
self.constraint_name, self.table_name, self.columns,
schema=self.schema, **self.kw)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('source', 'table_name'),
('local_cols', 'columns'),
])
def create_unique_constraint(
cls, operations, constraint_name, table_name, columns,
schema=None, **kw):
"""Issue a "create unique constraint" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_unique_constraint("uq_user_name", "user", ["name"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.UniqueConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the unique constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param columns: a list of string column names in the
source table.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* source -> table_name
* local_cols -> columns
"""
op = cls(
constraint_name, table_name, columns,
schema=schema, **kw
)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([('name', 'constraint_name')])
def batch_create_unique_constraint(
cls, operations, constraint_name, columns, **kw):
"""Issue a "create unique constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_unique_constraint`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
kw['schema'] = operations.impl.schema
op = cls(
constraint_name, operations.impl.table_name, columns,
**kw
)
return operations.invoke(op)
@Operations.register_operation("create_foreign_key")
@BatchOperations.register_operation(
"create_foreign_key", "batch_create_foreign_key")
class CreateForeignKeyOp(AddConstraintOp):
"""Represent a create foreign key constraint operation."""
constraint_type = "foreignkey"
def __init__(
self, constraint_name, source_table, referent_table, local_cols,
remote_cols, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.source_table = source_table
self.referent_table = referent_table
self.local_cols = local_cols
self.remote_cols = remote_cols
self._orig_constraint = _orig_constraint
self.kw = kw
def to_diff_tuple(self):
return ("add_fk", self.to_constraint())
@classmethod
def from_constraint(cls, constraint):
kw = {}
if constraint.onupdate:
kw['onupdate'] = constraint.onupdate
if constraint.ondelete:
kw['ondelete'] = constraint.ondelete
if constraint.initially:
kw['initially'] = constraint.initially
if constraint.deferrable:
kw['deferrable'] = constraint.deferrable
if constraint.use_alter:
kw['use_alter'] = constraint.use_alter
source_schema, source_table, \
source_columns, target_schema, \
target_table, target_columns = sqla_compat._fk_spec(constraint)
kw['source_schema'] = source_schema
kw['referent_schema'] = target_schema
return cls(
constraint.name,
source_table,
target_table,
source_columns,
target_columns,
_orig_constraint=constraint,
**kw
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.foreign_key_constraint(
self.constraint_name,
self.source_table, self.referent_table,
self.local_cols, self.remote_cols,
**self.kw)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('source', 'source_table'),
('referent', 'referent_table'),
])
def create_foreign_key(cls, operations, constraint_name,
source_table, referent_table, local_cols,
remote_cols, onupdate=None, ondelete=None,
deferrable=None, initially=None, match=None,
source_schema=None, referent_schema=None,
**dialect_kw):
"""Issue a "create foreign key" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_foreign_key(
"fk_user_address", "address",
"user", ["user_id"], ["id"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.ForeignKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the foreign key constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source_table: String name of the source table.
:param referent_table: String name of the destination table.
:param local_cols: a list of string column names in the
source table.
:param remote_cols: a list of string column names in the
remote table.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param source_schema: Optional schema name of the source table.
:param referent_schema: Optional schema name of the destination table.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* source -> source_table
* referent -> referent_table
"""
op = cls(
constraint_name,
source_table, referent_table,
local_cols, remote_cols,
onupdate=onupdate, ondelete=ondelete,
deferrable=deferrable,
source_schema=source_schema,
referent_schema=referent_schema,
initially=initially, match=match,
**dialect_kw
)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('referent', 'referent_table')
])
def batch_create_foreign_key(
cls, operations, constraint_name, referent_table,
local_cols, remote_cols,
referent_schema=None,
onupdate=None, ondelete=None,
deferrable=None, initially=None, match=None,
**dialect_kw):
"""Issue a "create foreign key" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``source_schema``
arguments from the call.
e.g.::
with batch_alter_table("address") as batch_op:
batch_op.create_foreign_key(
"fk_user_address",
"user", ["user_id"], ["id"])
.. seealso::
:meth:`.Operations.create_foreign_key`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* referent -> referent_table
"""
op = cls(
constraint_name,
operations.impl.table_name, referent_table,
local_cols, remote_cols,
onupdate=onupdate, ondelete=ondelete,
deferrable=deferrable,
source_schema=operations.impl.schema,
referent_schema=referent_schema,
initially=initially, match=match,
**dialect_kw
)
return operations.invoke(op)
@Operations.register_operation("create_check_constraint")
@BatchOperations.register_operation(
"create_check_constraint", "batch_create_check_constraint")
class CreateCheckConstraintOp(AddConstraintOp):
"""Represent a create check constraint operation."""
constraint_type = "check"
def __init__(
self, constraint_name, table_name,
condition, schema=None, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.table_name = table_name
self.condition = condition
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint(cls, constraint):
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
constraint.sqltext,
schema=constraint_table.schema,
_orig_constraint=constraint
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.check_constraint(
self.constraint_name, self.table_name,
self.condition, schema=self.schema, **self.kw)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('source', 'table_name')
])
def create_check_constraint(
cls, operations,
constraint_name, table_name, condition,
schema=None, **kw):
"""Issue a "create check constraint" instruction using the
current migration context.
e.g.::
from alembic import op
from sqlalchemy.sql import column, func
op.create_check_constraint(
"ck_user_name_len",
"user",
func.len(column('name')) > 5
)
CHECK constraints are usually against a SQL expression, so ad-hoc
table metadata is usually needed. The function will convert the given
arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
to an anonymous table in order to emit the CREATE statement.
:param name: Name of the check constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param condition: SQL expression that's the condition of the
constraint. Can be a string or SQLAlchemy expression language
structure.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* source -> table_name
"""
op = cls(constraint_name, table_name, condition, schema=schema, **kw)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([('name', 'constraint_name')])
def batch_create_check_constraint(
cls, operations, constraint_name, condition, **kw):
"""Issue a "create check constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_check_constraint`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
op = cls(
constraint_name, operations.impl.table_name,
condition, schema=operations.impl.schema, **kw)
return operations.invoke(op)
@Operations.register_operation("create_index")
@BatchOperations.register_operation("create_index", "batch_create_index")
class CreateIndexOp(MigrateOperation):
"""Represent a create index operation."""
def __init__(
self, index_name, table_name, columns, schema=None,
unique=False, _orig_index=None, **kw):
self.index_name = index_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.unique = unique
self.kw = kw
self._orig_index = _orig_index
def reverse(self):
return DropIndexOp.from_index(self.to_index())
def to_diff_tuple(self):
return ("add_index", self.to_index())
@classmethod
def from_index(cls, index):
return cls(
index.name,
index.table.name,
sqla_compat._get_index_expressions(index),
schema=index.table.schema,
unique=index.unique,
_orig_index=index,
**index.kwargs
)
def to_index(self, migration_context=None):
if self._orig_index:
return self._orig_index
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.index(
self.index_name, self.table_name, self.columns, schema=self.schema,
unique=self.unique, **self.kw)
@classmethod
@util._with_legacy_names([('name', 'index_name')])
def create_index(
cls, operations,
index_name, table_name, columns, schema=None,
unique=False, **kw):
"""Issue a "create index" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_index('ik_test', 't1', ['foo', 'bar'])
Functional indexes can be produced by using the
:func:`sqlalchemy.sql.expression.text` construct::
from alembic import op
from sqlalchemy import text
op.create_index('ik_test', 't1', [text('lower(foo)')])
.. versionadded:: 0.6.7 support for making use of the
:func:`~sqlalchemy.sql.expression.text` construct in
conjunction with
:meth:`.Operations.create_index` in
order to produce functional expressions within CREATE INDEX.
:param index_name: name of the index.
:param table_name: name of the owning table.
:param columns: a list consisting of string column names and/or
:func:`~sqlalchemy.sql.expression.text` constructs.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param unique: If True, create a unique index.
:param quote:
Force quoting of this column's name on or off, corresponding
to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> index_name
"""
op = cls(
index_name, table_name, columns, schema=schema,
unique=unique, **kw
)
return operations.invoke(op)
@classmethod
def batch_create_index(cls, operations, index_name, columns, **kw):
"""Issue a "create index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.create_index`
"""
op = cls(
index_name, operations.impl.table_name, columns,
schema=operations.impl.schema, **kw
)
return operations.invoke(op)
@Operations.register_operation("drop_index")
@BatchOperations.register_operation("drop_index", "batch_drop_index")
class DropIndexOp(MigrateOperation):
"""Represent a drop index operation."""
def __init__(
self, index_name, table_name=None, schema=None, _orig_index=None):
self.index_name = index_name
self.table_name = table_name
self.schema = schema
self._orig_index = _orig_index
def to_diff_tuple(self):
return ("remove_index", self.to_index())
def reverse(self):
if self._orig_index is None:
raise ValueError(
"operation is not reversible; "
"original index is not present")
return CreateIndexOp.from_index(self._orig_index)
@classmethod
def from_index(cls, index):
return cls(
index.name,
index.table.name,
schema=index.table.schema,
_orig_index=index
)
def to_index(self, migration_context=None):
if self._orig_index is not None:
return self._orig_index
schema_obj = schemaobj.SchemaObjects(migration_context)
# need a dummy column name here since SQLAlchemy
# 0.7.6 and further raises on Index with no columns
return schema_obj.index(
self.index_name, self.table_name, ['x'], schema=self.schema)
@classmethod
@util._with_legacy_names([
('name', 'index_name'),
('tablename', 'table_name')
])
def drop_index(cls, operations, index_name, table_name=None, schema=None):
"""Issue a "drop index" instruction using the current
migration context.
e.g.::
drop_index("accounts")
:param index_name: name of the index.
:param table_name: name of the owning table. Some
backends such as Microsoft SQL Server require this.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> index_name
"""
op = cls(index_name, table_name=table_name, schema=schema)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([('name', 'index_name')])
def batch_drop_index(cls, operations, index_name, **kw):
"""Issue a "drop index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.drop_index`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> index_name
"""
op = cls(
index_name, table_name=operations.impl.table_name,
schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("create_table")
class CreateTableOp(MigrateOperation):
"""Represent a create table operation."""
def __init__(
self, table_name, columns, schema=None, _orig_table=None, **kw):
self.table_name = table_name
self.columns = columns
self.schema = schema
self.kw = kw
self._orig_table = _orig_table
def reverse(self):
return DropTableOp.from_table(self.to_table())
def to_diff_tuple(self):
return ("add_table", self.to_table())
@classmethod
def from_table(cls, table):
return cls(
table.name,
list(table.c) + list(table.constraints),
schema=table.schema,
_orig_table=table,
**table.kwargs
)
def to_table(self, migration_context=None):
if self._orig_table is not None:
return self._orig_table
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name, *self.columns, schema=self.schema, **self.kw
)
@classmethod
@util._with_legacy_names([('name', 'table_name')])
def create_table(cls, operations, table_name, *columns, **kw):
"""Issue a "create table" instruction using the current migration
context.
This directive receives an argument list similar to that of the
traditional :class:`sqlalchemy.schema.Table` construct, but without the
metadata::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
op.create_table(
'account',
Column('id', INTEGER, primary_key=True),
Column('name', VARCHAR(50), nullable=False),
Column('description', NVARCHAR(200)),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
Note that :meth:`.create_table` accepts
:class:`~sqlalchemy.schema.Column`
constructs directly from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the "timestamp" column
op.create_table('account',
Column('id', INTEGER, primary_key=True),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
The function also returns a newly created
:class:`~sqlalchemy.schema.Table` object, corresponding to the table
specification given, which is suitable for
immediate SQL operations, in particular
:meth:`.Operations.bulk_insert`::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
account_table = op.create_table(
'account',
Column('id', INTEGER, primary_key=True),
Column('name', VARCHAR(50), nullable=False),
Column('description', NVARCHAR(200)),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
op.bulk_insert(
account_table,
[
{"name": "A1", "description": "account 1"},
{"name": "A2", "description": "account 2"},
]
)
.. versionadded:: 0.7.0
:param table_name: Name of the table
:param \*columns: collection of :class:`~sqlalchemy.schema.Column`
objects within
the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
objects
and :class:`~.sqlalchemy.schema.Index` objects.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
:return: the :class:`~sqlalchemy.schema.Table` object corresponding
to the parameters given.
.. versionadded:: 0.7.0 - the :class:`~sqlalchemy.schema.Table`
object is returned.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> table_name
"""
op = cls(table_name, columns, **kw)
return operations.invoke(op)
@Operations.register_operation("drop_table")
class DropTableOp(MigrateOperation):
"""Represent a drop table operation."""
def __init__(
self, table_name, schema=None, table_kw=None, _orig_table=None):
self.table_name = table_name
self.schema = schema
self.table_kw = table_kw or {}
self._orig_table = _orig_table
def to_diff_tuple(self):
return ("remove_table", self.to_table())
def reverse(self):
if self._orig_table is None:
raise ValueError(
"operation is not reversible; "
"original table is not present")
return CreateTableOp.from_table(self._orig_table)
@classmethod
def from_table(cls, table):
return cls(table.name, schema=table.schema, _orig_table=table)
def to_table(self, migration_context=None):
if self._orig_table is not None:
return self._orig_table
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name,
schema=self.schema,
**self.table_kw)
@classmethod
@util._with_legacy_names([('name', 'table_name')])
def drop_table(cls, operations, table_name, schema=None, **kw):
"""Issue a "drop table" instruction using the current
migration context.
e.g.::
drop_table("accounts")
:param table_name: Name of the table
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> table_name
"""
op = cls(table_name, schema=schema, table_kw=kw)
operations.invoke(op)
class AlterTableOp(MigrateOperation):
"""Represent an alter table operation."""
def __init__(self, table_name, schema=None):
self.table_name = table_name
self.schema = schema
@Operations.register_operation("rename_table")
class RenameTableOp(AlterTableOp):
"""Represent a rename table operation."""
def __init__(self, old_table_name, new_table_name, schema=None):
super(RenameTableOp, self).__init__(old_table_name, schema=schema)
self.new_table_name = new_table_name
@classmethod
def rename_table(
cls, operations, old_table_name, new_table_name, schema=None):
"""Emit an ALTER TABLE to rename a table.
:param old_table_name: old name.
:param new_table_name: new name.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
"""
op = cls(old_table_name, new_table_name, schema=schema)
return operations.invoke(op)
@Operations.register_operation("alter_column")
@BatchOperations.register_operation("alter_column", "batch_alter_column")
class AlterColumnOp(AlterTableOp):
"""Represent an alter column operation."""
def __init__(
self, table_name, column_name, schema=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
modify_nullable=None,
modify_server_default=False,
modify_name=None,
modify_type=None,
**kw
):
super(AlterColumnOp, self).__init__(table_name, schema=schema)
self.column_name = column_name
self.existing_type = existing_type
self.existing_server_default = existing_server_default
self.existing_nullable = existing_nullable
self.modify_nullable = modify_nullable
self.modify_server_default = modify_server_default
self.modify_name = modify_name
self.modify_type = modify_type
self.kw = kw
def to_diff_tuple(self):
col_diff = []
schema, tname, cname = self.schema, self.table_name, self.column_name
if self.modify_type is not None:
col_diff.append(
("modify_type", schema, tname, cname,
{
"existing_nullable": self.existing_nullable,
"existing_server_default": self.existing_server_default,
},
self.existing_type,
self.modify_type)
)
if self.modify_nullable is not None:
col_diff.append(
("modify_nullable", schema, tname, cname,
{
"existing_type": self.existing_type,
"existing_server_default": self.existing_server_default
},
self.existing_nullable,
self.modify_nullable)
)
if self.modify_server_default is not False:
col_diff.append(
("modify_default", schema, tname, cname,
{
"existing_nullable": self.existing_nullable,
"existing_type": self.existing_type
},
self.existing_server_default,
self.modify_server_default)
)
return col_diff
def has_changes(self):
hc1 = self.modify_nullable is not None or \
self.modify_server_default is not False or \
self.modify_type is not None
if hc1:
return True
for kw in self.kw:
if kw.startswith('modify_'):
return True
else:
return False
def reverse(self):
kw = self.kw.copy()
kw['existing_type'] = self.existing_type
kw['existing_nullable'] = self.existing_nullable
kw['existing_server_default'] = self.existing_server_default
if self.modify_type is not None:
kw['modify_type'] = self.modify_type
if self.modify_nullable is not None:
kw['modify_nullable'] = self.modify_nullable
if self.modify_server_default is not False:
kw['modify_server_default'] = self.modify_server_default
# TODO: make this a little simpler
all_keys = set(m.group(1) for m in [
re.match(r'^(?:existing_|modify_)(.+)$', k)
for k in kw
] if m)
for k in all_keys:
if 'modify_%s' % k in kw:
swap = kw['existing_%s' % k]
kw['existing_%s' % k] = kw['modify_%s' % k]
kw['modify_%s' % k] = swap
return self.__class__(
self.table_name, self.column_name, schema=self.schema,
**kw
)
@classmethod
@util._with_legacy_names([('name', 'new_column_name')])
def alter_column(
cls, operations, table_name, column_name,
nullable=None,
server_default=False,
new_column_name=None,
type_=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
schema=None, **kw
):
"""Issue an "alter column" instruction using the
current migration context.
Generally, only that aspect of the column which
is being changed, i.e. name, type, nullability,
default, needs to be specified. Multiple changes
can also be specified at once and the backend should
"do the right thing", emitting each change either
separately or together as the backend allows.
MySQL has special requirements here, since MySQL
cannot ALTER a column without a full specification.
When producing MySQL-compatible migration files,
it is recommended that the ``existing_type``,
``existing_server_default``, and ``existing_nullable``
parameters be present, if not being altered.
Type changes which are against the SQLAlchemy
"schema" types :class:`~sqlalchemy.types.Boolean`
and :class:`~sqlalchemy.types.Enum` may also
add or drop constraints which accompany those
types on backends that don't support them natively.
The ``existing_server_default`` argument is
used in this case as well to remove a previous
constraint.
:param table_name: string name of the target table.
:param column_name: string name of the target column,
as it exists before the operation begins.
:param nullable: Optional; specify ``True`` or ``False``
to alter the column's nullability.
:param server_default: Optional; specify a string
SQL expression, :func:`~sqlalchemy.sql.expression.text`,
or :class:`~sqlalchemy.schema.DefaultClause` to indicate
an alteration to the column's default value.
Set to ``None`` to have the default removed.
:param new_column_name: Optional; specify a string name here to
indicate the new name within a column rename operation.
:param ``type_``: Optional; a :class:`~sqlalchemy.types.TypeEngine`
type object to specify a change to the column's type.
For SQLAlchemy types that also indicate a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
the constraint is also generated.
:param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
currently understood by the MySQL dialect.
:param existing_type: Optional; a
:class:`~sqlalchemy.types.TypeEngine`
type object to specify the previous type. This
is required for all MySQL column alter operations that
don't otherwise specify a new type, as well as for
when nullability is being changed on a SQL Server
column. It is also used if the type is a so-called
SQLlchemy "schema" type which may define a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`,
:class:`~sqlalchemy.types.Enum`),
so that the constraint can be dropped.
:param existing_server_default: Optional; The existing
default value of the column. Required on MySQL if
an existing default is not being changed; else MySQL
removes the default.
:param existing_nullable: Optional; the existing nullability
of the column. Required on MySQL if the existing nullability
is not being changed; else MySQL sets this to NULL.
:param existing_autoincrement: Optional; the existing autoincrement
of the column. Used for MySQL's system of altering a column
that specifies ``AUTO_INCREMENT``.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
"""
alt = cls(
table_name, column_name, schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
**kw
)
return operations.invoke(alt)
@classmethod
def batch_alter_column(
cls, operations, column_name,
nullable=None,
server_default=False,
new_column_name=None,
type_=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
**kw
):
"""Issue an "alter column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
"""
alt = cls(
operations.impl.table_name, column_name,
schema=operations.impl.schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
**kw
)
return operations.invoke(alt)
@Operations.register_operation("add_column")
@BatchOperations.register_operation("add_column", "batch_add_column")
class AddColumnOp(AlterTableOp):
"""Represent an add column operation."""
def __init__(self, table_name, column, schema=None):
super(AddColumnOp, self).__init__(table_name, schema=schema)
self.column = column
def reverse(self):
return DropColumnOp.from_column_and_tablename(
self.schema, self.table_name, self.column)
def to_diff_tuple(self):
return ("add_column", self.schema, self.table_name, self.column)
def to_column(self):
return self.column
@classmethod
def from_column(cls, col):
return cls(col.table.name, col, schema=col.table.schema)
@classmethod
def from_column_and_tablename(cls, schema, tname, col):
return cls(tname, col, schema=schema)
@classmethod
def add_column(cls, operations, table_name, column, schema=None):
"""Issue an "add column" instruction using the current
migration context.
e.g.::
from alembic import op
from sqlalchemy import Column, String
op.add_column('organization',
Column('name', String())
)
The provided :class:`~sqlalchemy.schema.Column` object can also
specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing
a remote table name. Alembic will automatically generate a stub
"referenced" table and emit a second ALTER statement in order
to add the constraint separately::
from alembic import op
from sqlalchemy import Column, INTEGER, ForeignKey
op.add_column('organization',
Column('account_id', INTEGER, ForeignKey('accounts.id'))
)
Note that this statement uses the :class:`~sqlalchemy.schema.Column`
construct as is from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the column add
op.add_column('account',
Column('timestamp', TIMESTAMP, server_default=func.now())
)
:param table_name: String name of the parent table.
:param column: a :class:`sqlalchemy.schema.Column` object
representing the new column.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
"""
op = cls(table_name, column, schema=schema)
return operations.invoke(op)
@classmethod
def batch_add_column(cls, operations, column):
"""Issue an "add column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
"""
op = cls(
operations.impl.table_name, column,
schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("drop_column")
@BatchOperations.register_operation("drop_column", "batch_drop_column")
class DropColumnOp(AlterTableOp):
"""Represent a drop column operation."""
def __init__(
self, table_name, column_name, schema=None,
_orig_column=None, **kw):
super(DropColumnOp, self).__init__(table_name, schema=schema)
self.column_name = column_name
self.kw = kw
self._orig_column = _orig_column
def to_diff_tuple(self):
return (
"remove_column", self.schema, self.table_name, self.to_column())
def reverse(self):
if self._orig_column is None:
raise ValueError(
"operation is not reversible; "
"original column is not present")
return AddColumnOp.from_column_and_tablename(
self.schema, self.table_name, self._orig_column)
@classmethod
def from_column_and_tablename(cls, schema, tname, col):
return cls(tname, col.name, schema=schema, _orig_column=col)
def to_column(self, migration_context=None):
if self._orig_column is not None:
return self._orig_column
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.column(self.column_name, NULLTYPE)
@classmethod
def drop_column(
cls, operations, table_name, column_name, schema=None, **kw):
"""Issue a "drop column" instruction using the current
migration context.
e.g.::
drop_column('organization', 'account_id')
:param table_name: name of table
:param column_name: name of column
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param mssql_drop_check: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the CHECK constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.check_constraints,
then exec's a separate DROP CONSTRAINT for that constraint.
:param mssql_drop_default: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the DEFAULT constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.default_constraints,
then exec's a separate DROP CONSTRAINT for that default.
:param mssql_drop_foreign_key: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop a single FOREIGN KEY constraint on the column using a
SQL-script-compatible
block that selects into a @variable from
sys.foreign_keys/sys.foreign_key_columns,
then exec's a separate DROP CONSTRAINT for that default. Only
works if the column has exactly one FK constraint which refers to
it, at the moment.
.. versionadded:: 0.6.2
"""
op = cls(table_name, column_name, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_drop_column(cls, operations, column_name):
"""Issue a "drop column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.drop_column`
"""
op = cls(
operations.impl.table_name, column_name,
schema=operations.impl.schema)
return operations.invoke(op)
@Operations.register_operation("bulk_insert")
class BulkInsertOp(MigrateOperation):
"""Represent a bulk insert operation."""
def __init__(self, table, rows, multiinsert=True):
self.table = table
self.rows = rows
self.multiinsert = multiinsert
@classmethod
def bulk_insert(cls, operations, table, rows, multiinsert=True):
"""Issue a "bulk insert" operation using the current
migration context.
This provides a means of representing an INSERT of multiple rows
which works equally well in the context of executing on a live
connection as well as that of generating a SQL script. In the
case of a SQL script, the values are rendered inline into the
statement.
e.g.::
from alembic import op
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
# Create an ad-hoc table to use for the insert statement.
accounts_table = table('account',
column('id', Integer),
column('name', String),
column('create_date', Date)
)
op.bulk_insert(accounts_table,
[
{'id':1, 'name':'John Smith',
'create_date':date(2010, 10, 5)},
{'id':2, 'name':'Ed Williams',
'create_date':date(2007, 5, 27)},
{'id':3, 'name':'Wendy Jones',
'create_date':date(2008, 8, 15)},
]
)
When using --sql mode, some datatypes may not render inline
automatically, such as dates and other special types. When this
issue is present, :meth:`.Operations.inline_literal` may be used::
op.bulk_insert(accounts_table,
[
{'id':1, 'name':'John Smith',
'create_date':op.inline_literal("2010-10-05")},
{'id':2, 'name':'Ed Williams',
'create_date':op.inline_literal("2007-05-27")},
{'id':3, 'name':'Wendy Jones',
'create_date':op.inline_literal("2008-08-15")},
],
multiinsert=False
)
When using :meth:`.Operations.inline_literal` in conjunction with
:meth:`.Operations.bulk_insert`, in order for the statement to work
in "online" (e.g. non --sql) mode, the
:paramref:`~.Operations.bulk_insert.multiinsert`
flag should be set to ``False``, which will have the effect of
individual INSERT statements being emitted to the database, each
with a distinct VALUES clause, so that the "inline" values can
still be rendered, rather than attempting to pass the values
as bound parameters.
.. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now
be used with :meth:`.Operations.bulk_insert`, and the
:paramref:`~.Operations.bulk_insert.multiinsert` flag has
been added to assist in this usage when running in "online"
mode.
:param table: a table object which represents the target of the INSERT.
:param rows: a list of dictionaries indicating rows.
:param multiinsert: when at its default of True and --sql mode is not
enabled, the INSERT statement will be executed using
"executemany()" style, where all elements in the list of
dictionaries are passed as bound parameters in a single
list. Setting this to False results in individual INSERT
statements being emitted per parameter set, and is needed
in those cases where non-literal values are present in the
parameter sets.
.. versionadded:: 0.6.4
"""
op = cls(table, rows, multiinsert=multiinsert)
operations.invoke(op)
@Operations.register_operation("execute")
class ExecuteSQLOp(MigrateOperation):
"""Represent an execute SQL operation."""
def __init__(self, sqltext, execution_options=None):
self.sqltext = sqltext
self.execution_options = execution_options
@classmethod
def execute(cls, operations, sqltext, execution_options=None):
"""Execute the given SQL using the current migration context.
In a SQL script context, the statement is emitted directly to the
output stream. There is *no* return result, however, as this
function is oriented towards generating a change script
that can run in "offline" mode. For full interaction
with a connected database, use the "bind" available
from the context::
from alembic import op
connection = op.get_bind()
Also note that any parameterized statement here *will not work*
in offline mode - INSERT, UPDATE and DELETE statements which refer
to literal values would need to render
inline expressions. For simple use cases, the
:meth:`.inline_literal` function can be used for **rudimentary**
quoting of string values. For "bulk" inserts, consider using
:meth:`.bulk_insert`.
For example, to emit an UPDATE statement which is equally
compatible with both online and offline mode::
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table('account',
column('name', String)
)
op.execute(
account.update().\\
where(account.c.name==op.inline_literal('account 1')).\\
values({'name':op.inline_literal('account 2')})
)
Note above we also used the SQLAlchemy
:func:`sqlalchemy.sql.expression.table`
and :func:`sqlalchemy.sql.expression.column` constructs to
make a brief, ad-hoc table construct just for our UPDATE
statement. A full :class:`~sqlalchemy.schema.Table` construct
of course works perfectly fine as well, though note it's a
recommended practice to at least ensure the definition of a
table is self-contained within the migration script, rather
than imported from a module that may break compatibility with
older migrations.
:param sql: Any legal SQLAlchemy expression, including:
* a string
* a :func:`sqlalchemy.sql.expression.text` construct.
* a :func:`sqlalchemy.sql.expression.insert` construct.
* a :func:`sqlalchemy.sql.expression.update`,
:func:`sqlalchemy.sql.expression.insert`,
or :func:`sqlalchemy.sql.expression.delete` construct.
* Pretty much anything that's "executable" as described
in :ref:`sqlexpression_toplevel`.
:param execution_options: Optional dictionary of
execution options, will be passed to
:meth:`sqlalchemy.engine.Connection.execution_options`.
"""
op = cls(sqltext, execution_options=execution_options)
return operations.invoke(op)
class OpContainer(MigrateOperation):
"""Represent a sequence of operations operation."""
def __init__(self, ops=()):
self.ops = ops
def is_empty(self):
return not self.ops
def as_diffs(self):
return list(OpContainer._ops_as_diffs(self))
@classmethod
def _ops_as_diffs(cls, migrations):
for op in migrations.ops:
if hasattr(op, 'ops'):
for sub_op in cls._ops_as_diffs(op):
yield sub_op
else:
yield op.to_diff_tuple()
class ModifyTableOps(OpContainer):
"""Contains a sequence of operations that all apply to a single Table."""
def __init__(self, table_name, ops, schema=None):
super(ModifyTableOps, self).__init__(ops)
self.table_name = table_name
self.schema = schema
def reverse(self):
return ModifyTableOps(
self.table_name,
ops=list(reversed(
[op.reverse() for op in self.ops]
)),
schema=self.schema
)
class UpgradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'upgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def reverse_into(self, downgrade_ops):
downgrade_ops.ops[:] = list(reversed(
[op.reverse() for op in self.ops]
))
return downgrade_ops
def reverse(self):
return self.reverse_into(DowngradeOps(ops=[]))
class DowngradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'downgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def reverse(self):
return UpgradeOps(
ops=list(reversed(
[op.reverse() for op in self.ops]
))
)
class MigrationScript(MigrateOperation):
"""represents a migration script.
E.g. when autogenerate encounters this object, this corresponds to the
production of an actual script file.
A normal :class:`.MigrationScript` object would contain a single
:class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
.. seealso::
:ref:`customizing_revision`
"""
def __init__(
self, rev_id, upgrade_ops, downgrade_ops,
message=None,
imports=None, head=None, splice=None,
branch_label=None, version_path=None, depends_on=None):
self.rev_id = rev_id
self.message = message
self.imports = imports
self.head = head
self.splice = splice
self.branch_label = branch_label
self.version_path = version_path
self.depends_on = depends_on
self.upgrade_ops = upgrade_ops
self.downgrade_ops = downgrade_ops
|
graingert/alembic
|
alembic/operations/ops.py
|
Python
|
mit
| 69,224
|
# -*- coding: utf-8 -*-
"""
Django settings for puput_demo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
from puput import PUPUT_APPS
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('puput-demo')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Admin
'django.contrib.admin',
)
INSTALLED_APPS = DJANGO_APPS + PUPUT_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware'
)
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///puput-demo"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'loggers': {
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True,
},
},
}
}
WAGTAIL_SITE_NAME = 'Demo'
|
APSL/puput-demo
|
config/settings/common.py
|
Python
|
mit
| 7,805
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="barpolar.hoverlabel.font", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/barpolar/hoverlabel/font/_familysrc.py
|
Python
|
mit
| 475
|
# -*- coding: utf-8 -*-
#
# Discover the target host types in the subnet
#
# @author: Sreejith Kesavan <sreejithemk@gmail.com>
import arp
import oui
import ipcalc
import sys
class Discovery(object):
""" Find out the host types in the Ip range (CIDR)
NOTE: This finds mac addresses only within the subnet.
It doesn't fetch mac addresses for routed network ip's.
"""
def __init__(self):
self.__arp = arp.ARP()
self.__oui = oui.OUI()
def discover(self, address):
"""
Traverse the IP subnets and return manufacturer info.
"""
network = ipcalc.Network(address)
for ip in network:
ip = str(ip)
# Ignore broadcast IP Addresses
if '/' in address and ip == str(network.broadcast()):
print 'Ignoring broadcast ip: {broadcast}'.format(broadcast=str(network.broadcast()))
continue
mac = self.__arp.find_mac(ip)
if mac:
if len(mac.split(':')[0]) == 1:
mac = '0' + mac
manuf_str = mac.replace(':', '')[:6].upper()
manuf = self.__oui.find_manuf(manuf_str)
if manuf:
yield (ip, manuf)
def run():
if len(sys.argv) < 2:
print
print 'Usage:\t\tidiscover <ip-address/cidr>'
print 'Examples:'
print '\t\tidiscover 10.73.19.0'
print '\t\tidiscover 10.74.215/24'
print
else:
addrs = sys.argv[1:]
d = Discovery()
try:
for addr in addrs:
for ip, manuf in d.discover(addr):
print 'IP Address: {ip} Manufacturer: {manuf}'.format(ip=ip, manuf=manuf)
except KeyboardInterrupt:
print 'Exiting...'
if __name__ == '__main__':
run()
|
semk/iDiscover
|
idiscover/discover.py
|
Python
|
mit
| 1,840
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Place', fields ['place_id']
db.create_index('storybase_geo_place', ['place_id'])
# Adding index on 'Location', fields ['location_id']
db.create_index('storybase_geo_location', ['location_id'])
def backwards(self, orm):
# Removing index on 'Location', fields ['location_id']
db.delete_index('storybase_geo_location', ['location_id'])
# Removing index on 'Place', fields ['place_id']
db.delete_index('storybase_geo_place', ['place_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
}
}
complete_apps = ['storybase_geo']
|
denverfoundation/storybase
|
apps/storybase_geo/migrations/0004_auto.py
|
Python
|
mit
| 7,705
|
import csv
#import datetime
import numpy as np
import HackApp.models as database
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def handle(self, *args, **options):
percentageAfterNine = 0.05
percentage3to50 = 0.7
longBound = [13.375556, 16.610556]
latBound = [45.421944, 46.876667]
longN = 2000
padec = 0.005
def razporedi(grid, power, padec, x, y, davcna, poslovalnica):
obiskani = set()
def rekurzija(power, x, y):
if (x, y) in obiskani:
return
else:
obiskani.add((x, y))
try:
if grid[x][y][2] > power or power < 3:
return
else:
grid[x][y][2] = power
grid[x][y][0] = davcna
grid[x][y][1] = poslovalnica
rekurzija(power - padec, x - 1, y)
rekurzija(power - padec, x + 1, y)
rekurzija(power - padec, x, y - 1)
rekurzija(power - padec, x, y + 1)
except IndexError:
return
rekurzija(power, x, y)
return grid
def dolociBarvoInMeje(grid, squareSize, longBound, latBound):
obiskani = set()
novoObiskani = set()
mejneTocke = []
mejneBarve = set()
def rekurzija(x, y):
if (x, y) in obiskani or (x, y) in novoObiskani:
return
else:
novoObiskani.add((x, y))
try:
if grid[x][y][0] == 0:
return
if grid[x + 1][y][0] != grid[x][y][0] or grid[x + 1][y][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * y))
mejneTocke.append(str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneBarve.add(grid[x + 1][y][3])
else:
rekurzija(x + 1, y)
if grid[x - 1][y][0] != grid[x][y][0] or grid[x - 1][y][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * (x)) + "," + str(latBound[1] - squareSize * y))
mejneTocke.append(str(longBound[0] + squareSize * (x)) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneBarve.add(grid[x - 1][y][3])
else:
rekurzija(x - 1, y)
if grid[x][y + 1][0] != grid[x][y][0] or grid[x][y + 1][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * x) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneTocke.append(
str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneBarve.add(grid[x][y + 1][3])
else:
rekurzija(x, y + 1)
if grid[x][y - 1][0] != grid[x][y][0] or grid[x][y - 1][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * x) + "," + str(latBound[1] - squareSize * y))
mejneTocke.append(str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * y))
mejneBarve.add(grid[x][y - 1][3])
else:
rekurzija(x, y - 1)
except IndexError:
return
for i in range(len(grid)):
for j in range(len(grid[i])):
novoObiskani = set()
mejneBarve = set()
mejneTocke = []
davcna = 0
poslovalnica = 0
rekurzija(i, j)
if mejneTocke == []:
continue
# n najmanjsa barva, ki je nima noben sosed
n = 1
while True:
if n not in mejneBarve:
setData = True
for k in novoObiskani:
grid[k[0]][k[1]][3] = n
if setData:
davcna = grid[k[0]][k[1]][0]
poslovalnica = grid[k[0]][k[1]][1]
setData = False
break
else:
n += 1
koordinate = []
for l in mejneTocke:
koordinate.append(list(map(float, l.split(","))))
koordinate = np.array(koordinate).transpose()
plt.plot(koordinate[0], koordinate[1], 'r-')
koordinate = koordinate.transpose()
koordinate = np.array(koordinate).transpose()
kot = np.arctan2(koordinate[1], koordinate[0])
ureditev = np.argsort(kot)
koordinate = koordinate.transpose()[ureditev]
mejneTocke = []
for l in koordinate:
mejneTocke.append(",".join(list(map(str, l))))
koordinate = koordinate.transpose()
plt.plot(koordinate[0], koordinate[1], 'b-')
prikaz = database.Prikaz(lokacija=database.Lokacija.objects.get(poslovniProstor=hex(int(poslovalnica))[2:].upper(), davcnaStevilka = str(int(davcna))))
prikaz.koordinate = mejneTocke
prikaz.barva = str(n)
prikaz.save()
obiskani = obiskani | novoObiskani
return
with open("dragon_dump.csv") as file:
data = csv.reader(file, delimiter = ",")
n = 0
firme = {}
for i in data:
n += 1
if n % 100000 == 0:
print(n)
break
identifier = i[1] + i[3]
if identifier not in firme:
#davcna lokacija vsiRacuni vrednostVsehRacunov racuni3-50 racuniPo9
firme[identifier] = [i[1], int(i[3], 16), 1, float(i[7]), 0, 0, i[15], i[14]]
else:
firme[identifier][2] += 1
firme[identifier][3] += float(i[7])
timeArray = i[2].split(' ')
cas = list(map(int, timeArray[1].split(':')))
if float(i[7]) > 3 and float(i[7]) < 50:
firme[identifier][4] += 1
if cas[0] >= 21:
firme[identifier][5] += 1
kandidati = []
for i in firme:
kandidati.append(list(map(float, firme[i])))
# davcna lokacija vsiRacuni vrednostVsehRacunov racuni3-50 racuniPo9 long lat
kandidati = np.array(kandidati)
# kandidati = np.loadtxt("dolocanjeGostisc.txt", delimiter = "\t", skiprows = 1)
transKand = kandidati.transpose()
kandidati = kandidati[transKand[5] >= percentageAfterNine * transKand[2]]
transKand = kandidati.transpose()
kandidati = kandidati[transKand[4] >= percentage3to50 * transKand[2]]
squareSize = (longBound[1] - longBound[0]) / longN
latN = int(np.ceil((latBound[1] - latBound[0]) / squareSize))
# poravnano zgoraj
# notri [davcna, poslovalnica, moc, barva]
grid = np.zeros((longN, latN, 4))
tip = database.Tip.objects.get(id=45635)
for i in kandidati:
lokacija = database.Lokacija(poslovniProstor=hex(int(i[1]))[2:].upper(), davcnaStevilka = str(int(i[0])))
lokacija.long = i[6]
lokacija.lat = i[7]
lokacija.tip = tip
lokacija.save()
x = int((i[6] - longBound[0]) // squareSize)
y = int((latBound[1] - i[7]) // squareSize)
grid = razporedi(grid, i[3], i[3] * padec, x, y, i[0], i[1])
dolociBarvoInMeje(grid, squareSize, longBound, latBound)
|
EvaErzin/DragonHack
|
DragonHack/management/commands/dolocanjeGostisc.py
|
Python
|
mit
| 8,452
|
import os
import mock
import pytest
import bridgy.inventory
from bridgy.inventory import InventorySet, Instance
from bridgy.inventory.aws import AwsInventory
from bridgy.config import Config
def get_aws_inventory(name):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = AwsInventory(name=name, cache_dir=cache_dir, access_key_id='access_key_id',
secret_access_key='secret_access_key', session_token='session_token',
region='region')
return aws_obj
def test_inventory_set(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = get_aws_inventory(name='aws')
inventorySet = InventorySet()
inventorySet.add(aws_obj)
inventorySet.add(aws_obj)
print(aws_obj.instances())
all_instances = inventorySet.instances()
aws_instances = [
Instance(name=u'test-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-8-185.us-west-2.compute.internal', u'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-138.us-west-2.compute.internal', u'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-account-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-139.us-west-2.compute.internal', u'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-142.us-west-2.compute.internal', u'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-140.us-west-2.compute.internal', u'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-141.us-west-2.compute.internal', u'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-38.us-west-2.compute.internal', u'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-39.us-west-2.compute.internal', u'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
]
expected_instances = aws_instances + aws_instances
assert len(all_instances) == len(expected_instances)
assert set(all_instances) == set(expected_instances)
def test_inventory_set_filter_sources(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
inventorySet = InventorySet()
inventorySet.add(get_aws_inventory(name='aws'))
inventorySet.add(get_aws_inventory(name='awesome'))
print(inventorySet.instances())
all_instances = inventorySet.instances(filter_sources='awesome')
# aws_instances = [
# Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
# ]
awesome_instances = [
Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='awesome (aws)', container_id=None, type='VM')
]
assert len(all_instances) == len(awesome_instances)
assert set(all_instances) == set(awesome_instances)
all_instances = inventorySet.instances(filter_sources='bogus')
assert len(all_instances) == 0
|
wagoodman/bridgy
|
tests/test_inventory_set.py
|
Python
|
mit
| 6,497
|
from bluepy.btle import *
import time
import serial
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
start_time = time.time()
data = []
data2 = []
data3 = []
data4 = []
angles = []
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pen = pg.mkPen('k', width=8)
app = QtGui.QApplication([])
plotWidget = pg.plot(title='biomechanics')
plotWidget.setWindowTitle('elbow angle')
plotWidget.setLabels(left=('angle', 'degrees'))
plotWidget.plotItem.getAxis('left').setPen(pen)
plotWidget.plotItem.getAxis('bottom').setPen(pen)
curve = plotWidget.plot(pen=pen)
plotWidget.setYRange(20, 210)
data = [0]
ser = serial.Serial("/dev/rfcomm0", 9600, timeout=0.5)
t = [0]
# from calibration
arm_straight = 957
arm_bent = 987
class MyDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
global data2, data3, data4, angle
if cHandle == 37:
data = data.decode("utf-8")
data2.append(data)
data3 = ''.join(data2)
data4 = data3.splitlines()
angle = 180 - (float(data4[-1]) - arm_straight) / (arm_bent - arm_straight) * 135
print(data4[-1])
angles.append(angle)
# print(data4[-1], angle)
else:
print('received an unexpected handle')
print('Attempting to connect...')
mac1 = 'a4:d5:78:0d:1c:53'
mac2 = 'a4:d5:78:0d:2e:fc'
per = Peripheral(mac1, "public")
per.setDelegate(MyDelegate())
print("Connected")
def update():
global curve, data, angles2
if per.waitForNotifications(1):
t.append(time.time() - start_time)
x = list(range(0, len(angles), 1))
angles2 = [float(i) for i in angles]
curve.setData(x[-50:-1], angles2[-50:-1])
app.processEvents()
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
ac769/continuum_technologies
|
software/ble_live_read_graphical.py
|
Python
|
mit
| 2,073
|
"""
GatewayScanner is an abstraction for searching for KNX/IP devices on the local network.
* It walks through all network interfaces
* and sends UDP multicast search requests
* it returns the first found device
"""
from __future__ import annotations
import asyncio
from functools import partial
import logging
from typing import TYPE_CHECKING
import netifaces
from xknx.knxip import (
DIB,
HPAI,
DIBDeviceInformation,
DIBServiceFamily,
DIBSuppSVCFamilies,
KNXIPFrame,
KNXIPServiceType,
SearchRequest,
SearchResponse,
)
from xknx.telegram import IndividualAddress
from .transport import UDPTransport
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class GatewayDescriptor:
"""Used to return information about the discovered gateways."""
def __init__(
self,
ip_addr: str,
port: int,
local_ip: str = "",
local_interface: str = "",
name: str = "UNKNOWN",
supports_routing: bool = False,
supports_tunnelling: bool = False,
supports_tunnelling_tcp: bool = False,
individual_address: IndividualAddress | None = None,
):
"""Initialize GatewayDescriptor class."""
self.name = name
self.ip_addr = ip_addr
self.port = port
self.local_interface = local_interface
self.local_ip = local_ip
self.supports_routing = supports_routing
self.supports_tunnelling = supports_tunnelling
self.supports_tunnelling_tcp = supports_tunnelling_tcp
self.individual_address = individual_address
def parse_dibs(self, dibs: list[DIB]) -> None:
"""Parse DIBs for gateway information."""
for dib in dibs:
if isinstance(dib, DIBSuppSVCFamilies):
self.supports_routing = dib.supports(DIBServiceFamily.ROUTING)
if dib.supports(DIBServiceFamily.TUNNELING):
self.supports_tunnelling = True
self.supports_tunnelling_tcp = dib.supports(
DIBServiceFamily.TUNNELING, version=2
)
continue
if isinstance(dib, DIBDeviceInformation):
self.name = dib.name
self.individual_address = dib.individual_address
continue
def __repr__(self) -> str:
"""Return object as representation string."""
return (
"GatewayDescriptor(\n"
f" name={self.name},\n"
f" ip_addr={self.ip_addr},\n"
f" port={self.port},\n"
f" local_interface={self.local_interface},\n"
f" local_ip={self.local_ip},\n"
f" supports_routing={self.supports_routing},\n"
f" supports_tunnelling={self.supports_tunnelling},\n"
f" supports_tunnelling_tcp={self.supports_tunnelling_tcp},\n"
f" individual_address={self.individual_address}\n"
")"
)
def __str__(self) -> str:
"""Return object as readable string."""
return f"{self.individual_address} - {self.name} @ {self.ip_addr}:{self.port}"
class GatewayScanFilter:
"""Filter to limit gateway scan attempts.
If `tunnelling` and `routing` are set it is treated as AND.
KNX/IP devices that don't support `tunnelling` or `routing` aren't matched.
"""
def __init__(
self,
name: str | None = None,
tunnelling: bool | None = None,
tunnelling_tcp: bool | None = None,
routing: bool | None = None,
):
"""Initialize GatewayScanFilter class."""
self.name = name
self.tunnelling = tunnelling
self.tunnelling_tcp = tunnelling_tcp
self.routing = routing
def match(self, gateway: GatewayDescriptor) -> bool:
"""Check whether the device is a gateway and given GatewayDescriptor matches the filter."""
if self.name is not None and self.name != gateway.name:
return False
if (
self.tunnelling is not None
and self.tunnelling != gateway.supports_tunnelling
):
return False
if (
self.tunnelling_tcp is not None
and self.tunnelling_tcp != gateway.supports_tunnelling_tcp
):
return False
if self.routing is not None and self.routing != gateway.supports_routing:
return False
return (
gateway.supports_tunnelling
or gateway.supports_tunnelling_tcp
or gateway.supports_routing
)
class GatewayScanner:
"""Class for searching KNX/IP devices."""
def __init__(
self,
xknx: XKNX,
timeout_in_seconds: float = 4.0,
stop_on_found: int | None = 1,
scan_filter: GatewayScanFilter = GatewayScanFilter(),
):
"""Initialize GatewayScanner class."""
self.xknx = xknx
self.timeout_in_seconds = timeout_in_seconds
self.stop_on_found = stop_on_found
self.scan_filter = scan_filter
self.found_gateways: list[GatewayDescriptor] = []
self._udp_transports: list[UDPTransport] = []
self._response_received_event = asyncio.Event()
self._count_upper_bound = 0
"""Clean value of self.stop_on_found, computed when ``scan`` is called."""
async def scan(self) -> list[GatewayDescriptor]:
"""Scan and return a list of GatewayDescriptors on success."""
if self.stop_on_found is None:
self._count_upper_bound = 0
else:
self._count_upper_bound = max(0, self.stop_on_found)
await self._send_search_requests()
try:
await asyncio.wait_for(
self._response_received_event.wait(),
timeout=self.timeout_in_seconds,
)
except asyncio.TimeoutError:
pass
finally:
self._stop()
return self.found_gateways
def _stop(self) -> None:
"""Stop tearing down udp_transport."""
for udp_transport in self._udp_transports:
udp_transport.stop()
async def _send_search_requests(self) -> None:
"""Find all interfaces with active IPv4 connection to search for gateways."""
for interface in netifaces.interfaces():
try:
af_inet = netifaces.ifaddresses(interface)[netifaces.AF_INET]
ip_addr = af_inet[0]["addr"]
except KeyError:
logger.debug("No IPv4 address found on %s", interface)
continue
except ValueError as err:
# rare case when an interface disappears during search initialisation
logger.debug("Invalid interface %s: %s", interface, err)
continue
else:
await self._search_interface(interface, ip_addr)
async def _search_interface(self, interface: str, ip_addr: str) -> None:
"""Send a search request on a specific interface."""
logger.debug("Searching on %s / %s", interface, ip_addr)
udp_transport = UDPTransport(
self.xknx,
(ip_addr, 0),
(self.xknx.multicast_group, self.xknx.multicast_port),
multicast=True,
)
udp_transport.register_callback(
partial(self._response_rec_callback, interface=interface),
[KNXIPServiceType.SEARCH_RESPONSE],
)
await udp_transport.connect()
self._udp_transports.append(udp_transport)
discovery_endpoint = HPAI(
ip_addr=self.xknx.multicast_group, port=self.xknx.multicast_port
)
search_request = SearchRequest(self.xknx, discovery_endpoint=discovery_endpoint)
udp_transport.send(KNXIPFrame.init_from_body(search_request))
def _response_rec_callback(
self,
knx_ip_frame: KNXIPFrame,
source: HPAI,
udp_transport: UDPTransport,
interface: str = "",
) -> None:
"""Verify and handle knxipframe. Callback from internal udp_transport."""
if not isinstance(knx_ip_frame.body, SearchResponse):
logger.warning("Could not understand knxipframe")
return
gateway = GatewayDescriptor(
ip_addr=knx_ip_frame.body.control_endpoint.ip_addr,
port=knx_ip_frame.body.control_endpoint.port,
local_ip=udp_transport.local_addr[0],
local_interface=interface,
)
gateway.parse_dibs(knx_ip_frame.body.dibs)
logger.debug("Found KNX/IP device at %s: %s", source, repr(gateway))
self._add_found_gateway(gateway)
def _add_found_gateway(self, gateway: GatewayDescriptor) -> None:
if self.scan_filter.match(gateway) and not any(
_gateway.individual_address == gateway.individual_address
for _gateway in self.found_gateways
):
self.found_gateways.append(gateway)
if 0 < self._count_upper_bound <= len(self.found_gateways):
self._response_received_event.set()
|
XKNX/xknx
|
xknx/io/gateway_scanner.py
|
Python
|
mit
| 9,132
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms.module.page.models import Page
from feincms.content.richtext.models import RichTextContent
from feincms.content.medialibrary.models import MediaFileContent
Page.register_extensions('feincms.module.extensions.datepublisher',) # Example set of extensions
Page.register_templates({
'title': _('Standard template'),
'path': 'cms_base.html',
'regions': (
('main', _('Main content area')),
# ('sidebar', _('Sidebar'), 'inherited'),
),
})
Page.create_content_type(RichTextContent)
|
symroe/remakery
|
cms/models.py
|
Python
|
mit
| 615
|
import sublime, sublime_plugin
def clean_layout(layout):
row_set = set()
col_set = set()
for cell in layout["cells"]:
row_set.add(cell[1])
row_set.add(cell[3])
col_set.add(cell[0])
col_set.add(cell[2])
row_set = sorted(row_set)
col_set = sorted(col_set)
rows = layout["rows"]
cols = layout["cols"]
layout["rows"] = [row for i, row in enumerate(rows) if i in row_set]
layout["cols"] = [col for i, col in enumerate(cols) if i in col_set]
row_map = { row : i for i, row in enumerate(row_set) }
col_map = { col : i for i, col in enumerate(col_set) }
layout["cells"] = [[col_map[cell[0]], row_map[cell[1]], col_map[cell[2]], row_map[cell[3]]] for cell in layout["cells"]]
return layout
def collapse_group(group):
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
window = sublime.active_window()
layout = window.get_layout()
cells = layout["cells"]
new_cells = []
group_cell = cells[group]
cells = cells[:group] + cells[group + 1:]
for cell in cells:
if cell[BOTTOM] == group_cell[TOP] and cell[LEFT] >= group_cell[LEFT] and cell[RIGHT] <= group_cell[RIGHT]:
new_cells.append([
cell[LEFT],
cell[TOP],
cell[RIGHT],
group_cell[BOTTOM]
])
elif cell != group_cell:
new_cells.append(cell)
layout["cells"] = new_cells
window.set_layout(clean_layout(layout))
class OutputView:
content = ""
position = 0.0
id = None
def __init__(self, view):
self.view = view
def __getattr__(self, name):
if self.view.id() != id:
output = OutputView.find_view()
if output:
self.view = output.view
return getattr(self.view, name)
def clear(self):
OutputView.content = ""
self.run_command("output_view_clear")
def append(self, text):
OutputView.content += text
self.run_command("output_view_append", { "text" : text })
def append_finish_message(self, command, working_dir, return_code, elapsed_time):
if return_code != 0:
templ = "[Finished in {:.2f}s with exit code {}]\n"
self.append(templ.format(elapsed_time, return_code))
self.append("[cmd: {}]\n".format(command))
self.append("[dir: {}]\n".format(working_dir))
else:
self.append("[Finished in {:.2f}s]\n".format(elapsed_time))
def _collapse(self, group):
window = sublime.active_window()
views = window.views_in_group(group)
if (len(views) == 0 or len(views) == 1 and
views[0].id() == self.view.id()):
collapse_group(group)
def _close(self):
window = sublime.active_window()
group, index = window.get_view_index(self.view)
window.run_command("close_by_index", {"group": group, "index": index})
self._collapse(group)
OutputView.id = None
@staticmethod
def close():
window = sublime.active_window()
for view in window.views():
if view.is_scratch() and view.name() == "Output":
OutputView(view)._close()
@staticmethod
def find_view():
window = sublime.active_window()
for view in window.views():
if view.is_scratch() and view.name() == "Output":
return OutputView(view)
return None
@staticmethod
def create():
view = OutputView.request()
view.clear()
return view
@staticmethod
def request():
window = sublime.active_window()
num_groups = window.num_groups()
if num_groups < 3:
layout = window.get_layout()
num_rows = len(layout["rows"]) - 1
num_cols = len(layout["cols"]) - 1
if len(layout["rows"]) < 3:
begin = layout["rows"][-2]
end = layout["rows"][-1]
layout["rows"] = layout["rows"][:-1] + [begin * 0.33 + end * 0.66, layout["rows"][-1]]
cells = []
new_num_rows = len(layout["rows"]) - 1
for cell in layout["cells"]:
if cell[3] == num_rows and cell[2] != num_cols:
cells.append([cell[0], cell[1], cell[2], new_num_rows])
else:
cells.append(cell)
cells.append([num_cols - 1, new_num_rows - 1, num_cols, new_num_rows])
layout["cells"] = cells
window.set_layout(layout)
num_groups = window.num_groups()
views = window.views_in_group(num_groups - 1)
output = None
for view in views:
if view.name() == "Output" and view.is_scratch():
output = view
if output == None:
active = window.active_view()
output = window.new_file()
output.settings().set("line_numbers", False)
output.settings().set("scroll_past_end", False)
output.settings().set("scroll_speed", 0.0)
output.settings().set("gutter", False)
output.settings().set("spell_check", False)
output.set_scratch(True)
output.set_name("Output")
output.run_command("output_view_append", { "text" : OutputView.content })
def update():
output.set_viewport_position((0, OutputView.position), False)
sublime.set_timeout(update, 0.0)
OutputView.id = output.id()
window.set_view_index(output, num_groups - 1, len(views))
window.focus_view(active)
return OutputView(output)
class OutputViewClearCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.erase(edit, sublime.Region(0, self.view.size()))
class OutputViewAppendCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
scroll = self.view.visible_region().end() == self.view.size()
view = self.view
view.insert(edit, view.size(), text)
if scroll:
viewport = view.viewport_extent()
last_line = view.text_to_layout(view.size())
view.set_viewport_position((0, last_line[1] - viewport[1]), False)
class OpenOutputCommand(sublime_plugin.WindowCommand):
def run(self):
OutputView.request()
class CloseOutputCommand(sublime_plugin.ApplicationCommand):
def run(self):
OutputView.close()
class OutputEventListener(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
print(key)
if key == "output_visible":
return OutputView.find_view() != None
else:
return None
def on_close(self, view):
if view.is_scratch() and view.name() == "Output":
OutputView.position = view.viewport_position()[1]
|
ciechowoj/minion
|
output.py
|
Python
|
mit
| 6,974
|
class Card:
count = 0
url = ""
name = ""
sideboard = -1
|
Riizade/Magic-the-Gathering-Analysis
|
card.py
|
Python
|
mit
| 71
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import re
import unicodedata
import datetime
import subprocess
from py3compat import string_types, text_type
from django.utils import timezone
from django.conf import settings
from uninond.models.SMSMessages import SMSMessage
# default country prefix
COUNTRY_PREFIX = getattr(settings, 'COUNTRY_PREFIX', 223)
ALL_COUNTRY_CODES = [1242, 1246, 1264, 1268, 1284, 1340, 1345, 1441, 1473,
1599, 1649, 1664, 1670, 1671, 1684, 1758, 1767, 1784,
1809, 1868, 1869, 1876, 1, 20, 212, 213, 216, 218, 220,
221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 248, 249, 250, 251, 252, 253, 254, 255,
256, 257, 258, 260, 261, 262, 263, 264, 265, 266, 267,
268, 269, 27, 290, 291, 297, 298, 299, 30, 31, 32, 33,
34, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
36, 370, 371, 372, 373, 374, 375, 376, 377, 378, 380,
381, 382, 385, 386, 387, 389, 39, 40, 41, 420, 421, 423,
43, 44, 45, 46, 47, 48, 49, 500, 501, 502, 503, 504,
505, 506, 507, 508, 509, 51, 52, 53, 54, 55, 56, 57, 58,
590, 591, 592, 593, 595, 597, 598, 599, 60, 61, 62, 63,
64, 65, 66, 670, 672, 673, 674, 675, 676, 677, 678, 679,
680, 681, 682, 683, 685, 686, 687, 688, 689, 690, 691,
692, 7, 81, 82, 84, 850, 852, 853, 855, 856, 86, 870,
880, 886, 90, 91, 92, 93, 94, 95, 960, 961, 962, 963,
964, 965, 966, 967, 968, 970, 971, 972, 973, 974, 975,
976, 977, 98, 992, 993, 994, 995, 996, 998]
MONTHS = ['J', 'F', 'M', 'A', 'Y', 'U', 'L', 'G', 'S', 'O', 'N', 'D']
ALPHA = 'abcdefghijklmnopqrstuvwxyz'
def phonenumber_isint(number):
''' whether number is in international format '''
if re.match(r'^[+|(]', number):
return True
if re.match(r'^\d{1,4}\.\d+$', number):
return True
return False
def phonenumber_indicator(number):
''' extract indicator from number or "" '''
for indic in ALL_COUNTRY_CODES:
if number.startswith("%{}".format(indic)) \
or number.startswith("+{}".format(indic)):
return str(indic)
return ""
def phonenumber_cleaned(number):
''' return (indicator, number) cleaned of space and other '''
# clean up
if not isinstance(number, string_types):
number = number.__str__()
# cleanup markup
clean_number = re.sub(r'[^\d\+]', '', number)
if phonenumber_isint(clean_number):
h, indicator, clean_number = \
clean_number.partition(phonenumber_indicator(clean_number))
return (indicator, clean_number)
return (None, clean_number)
def join_phonenumber(prefix, number, force_intl=True):
if not number:
return None
if not prefix and force_intl:
prefix = COUNTRY_PREFIX
return "+{prefix}{number}".format(prefix=prefix, number=number)
def phonenumber_repr(number, skip_indicator=str(COUNTRY_PREFIX)):
''' properly formated for visualization: (xxx) xx xx xx xx '''
def format(number):
if len(number) % 2 == 0:
span = 2
else:
span = 3
# use NBSP
return " ".join(["".join(number[i:i + span])
for i in range(0, len(number), span)])
indicator, clean_number = phonenumber_cleaned(number)
# string-only identity goes into indicator
if indicator is None and not clean_number:
return number.strip()
if indicator and indicator != skip_indicator:
return "(%(ind)s) %(num)s" \
% {'ind': indicator,
'num': format(clean_number)}
return format(clean_number)
def normalized_phonenumber(number_text):
if number_text is None or not number_text.strip():
return None
return join_phonenumber(*phonenumber_cleaned(number_text))
def operator_from_malinumber(number, default=settings.FOREIGN):
''' ORANGE or MALITEL based on the number prefix '''
indicator, clean_number = phonenumber_cleaned(
normalized_phonenumber(number))
if indicator is not None and indicator != str(COUNTRY_PREFIX):
return default
for operator, opt in settings.OPERATORS.items():
for prefix in opt[1]:
if clean_number.startswith(str(prefix)):
return operator
return default
def send_sms(to, text):
return SMSMessage.objects.create(
direction=SMSMessage.OUTGOING,
identity=to,
event_on=timezone.now(),
text=text)
def fake_message(to, text):
message = send_sms(to, text)
message.handled = True
message.save()
return message
def to_ascii(text):
return unicodedata.normalize('NFKD', unicode(text)) \
.encode('ASCII', 'ignore').strip()
def date_to_ident(adate):
year, month, day = adate.timetuple()[0:3]
hyear = text_type(year)[-1]
if day > 16:
hmonth = ALPHA[month * 2]
hday = hex(day // 2)[2:]
else:
hmonth = ALPHA[month]
hday = hex(day)[2:]
return "{y}{m}{d}".format(m=hmonth, d=hday, y=hyear)
def ident_to_date(ident):
hyear, hmonth, hday = ident[0], ident[1], ident[2:]
year = int('201{}'.format(hyear))
day = int(hday, 16)
month = ALPHA.index(hmonth)
if month > 12:
month //= 2
day *= 2
return datetime.date(year, month, day)
def dispatch_sms(text, roles, root):
sent_messages = []
for identity in root.ancestors_contacts(roles, identies_only=True):
sent_messages.append(send_sms(identity, text))
return sent_messages
def datetime_repr(adatetime):
return ("{date} à {time}"
.format(date=adatetime.strftime("%A %-d"),
time=adatetime.strftime("%Hh%M")).lower())
def exec_cmd(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
return process.returncode
|
yeleman/uninond
|
uninond/tools.py
|
Python
|
mit
| 6,367
|
from django.conf import settings
from django.contrib import messages
from django.forms import Form
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
import dateutil.parser, json
from itsdangerous import BadSignature
from appointments.apps.timeslots.models import Action, Constraint
from appointments.apps.timeslots.utils import strfdate, strftime, strptime, is_available
from .forms import ReminderForm
from .models import Appointment, User
from .utils import get_logger, get_serializer, send_confirmation, send_receipt, send_reminder
# Create your views here.
def book(request):
logger = get_logger(__name__, request)
if 'POST' == request.method and request.is_ajax():
fields = json.loads(request.body)
try:
user = User.objects.get(email__iexact=fields['email'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (email)")
return HttpResponseBadRequest()
except User.DoesNotExist:
user = User(email=fields['email'], is_active=False)
user.save()
logger.info("New user %s" % (str(user)))
try:
action = Action.objects.get(slug=fields['action'])
except (KeyError, Action.DoesNotExist):
logger.warning("Bad form submission: KeyError (action) or Action.DoesNotExist")
# This is an error; time to log, then fail
return HttpResponseBadRequest()
try:
constraint = Constraint.objects.get(slug=fields['constraint'])
except (KeyError, Constraint.DoesNotExist):
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (constraint) or Constraint.DoesNotExist")
return HttpResponseBadRequest()
if action not in constraint.actions.all():
# This is an error; time to log, then fail
logger.warning("Bad form submission: bad constraint/action combination")
return HttpResponseBadRequest()
# Ignore timezone to prevent one-off problems
try:
date = dateutil.parser.parse(fields['date'], ignoretz=True).date()
time = strptime(fields['time'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (date and/or time)")
return HttpResponseBadRequest()
# Check if timeslot is available
if not is_available(constraint, date, time):
# Return some meaningful JSON to say that time is not available
logger.warning("Bad form submission: timeslot not available")
return HttpResponseBadRequest()
# Preprocess sex to ensure it's a valid value
sex = fields['sex'][0].upper() if fields.get('sex', None) else None
if sex not in ['M', 'F']:
sex = ''
appointment = Appointment(
user=user,
action=action,
constraint=constraint,
date=date,
time=time,
# Optional fields...
first_name=fields.get('first_name',''),
last_name=fields.get('last_name',''),
nationality = fields.get('nationality',''),
sex=sex,
# See if this works without any changes...
identity_number=fields.get('identity_number',''),
document_number=fields.get('document_number',''),
phone_number=fields.get('phone_number',''),
mobile_number=fields.get('mobile_number',''),
comment=fields.get('comment',''),
)
# Save the appointment; then log it
appointment.save()
logger.info("New appointment by %s in %s/%s on %s at %s" % (
str(appointment.user),
appointment.constraint.key.slug,
appointment.constraint.slug,
strfdate(appointment.date),
strftime(appointment.time),
)
)
send_receipt(appointment)
messages.success(request, _("We've send you an e-mail receipt. Please confirm your appointment by following the instructions."))
# Return some JSON...
return HttpResponse("Ok")
elif 'POST' == request.method:
logger.warning("XMLHttpRequest header not set on POST request")
return HttpResponseBadRequest("XMLHttpRequest (AJAX) form submissions only please!")
return render(request, 'book.html')
def cancel(request, payload):
from itsdangerous import BadSignature
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.warning(request, _("You've already cancelled this appointment."))
return redirect('finish')
if 'POST' == request.method:
form = Form(request.POST)
if form.is_valid():
appointment.cancel()
messages.info(request, _("You successfully cancelled your appointment."))
return redirect('finish')
# This doesn't seem to be the correct return code
return Http404
form = Form()
return render(request, 'cancel.html', {'form': form})
def confirm(request, payload):
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.error(request, _("You cannot reconfirm a cancelled appointment. Please book again."))
elif appointment.is_confirmed():
messages.warning(request, _("Thank you, no need to reconfirm."))
else:
appointment.confirm()
appointment.user.verify()
send_confirmation(appointment)
messages.success(request, _("Thank you for confirming your appointment."))
return redirect('finish')
def reminder(request):
if 'POST' == request.method:
form = ReminderForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
user = User.objects.get(email=email)
date = timezone.now().date()
appointments = user.appointments.filter(date__gte=date)
send_reminder(user, appointments)
except User.DoesNotExist:
pass
messages.success(request, _("We'll send you an e-mail with all your appointments."))
return redirect('finish')
else:
form = ReminderForm()
return render(request, 'reminder.html', {'form': form})
# Custom error views
def handler404(request):
return render(request, '404.html')
|
marceloomens/appointments
|
appointments/apps/common/views.py
|
Python
|
mit
| 7,355
|
from collections import defaultdict
from typing import cast, Dict, List, NewType
from backend.common.consts.api_version import ApiMajorVersion
from backend.common.models.event_details import EventDetails
from backend.common.models.keys import TeamKey
from backend.common.queries.dict_converters.converter_base import ConverterBase
EventDetailsDict = NewType("EventDetailsDict", Dict)
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
ApiMajorVersion.API_V3: 3,
}
@classmethod
def _convert_list(cls, model_list: List[EventDetails], version: ApiMajorVersion):
CONVERTERS = {
3: cls.eventsDetailsConverter_v3,
}
return CONVERTERS[version](model_list)
@classmethod
def eventsDetailsConverter_v3(cls, event_details: List[EventDetails]):
return list(map(cls.eventDetailsConverter_v3, event_details))
@classmethod
def eventDetailsConverter_v3(cls, event_details: EventDetails) -> EventDetailsDict:
normalized_oprs = defaultdict(dict)
if event_details and event_details.matchstats:
for stat_type, stats in event_details.matchstats.items():
if stat_type in {"oprs", "dprs", "ccwms"}:
for team, value in cast(Dict[TeamKey, float], stats).items():
if "frc" not in team: # Normalize output
team = "frc{}".format(team)
normalized_oprs[stat_type][team] = value
rankings = {}
if event_details:
rankings = event_details.renderable_rankings
else:
rankings = {
"extra_stats_info": [],
"rankings": [],
"sort_order_info": None,
}
event_details_dict = {
"alliances": event_details.alliance_selections if event_details else [],
"district_points": event_details.district_points if event_details else {},
"insights": event_details.insights
if event_details
else {"qual": {}, "playoff": {}},
"oprs": normalized_oprs if normalized_oprs else {}, # OPRs, DPRs, CCWMs
"predictions": event_details.predictions if event_details else {},
"rankings": rankings,
}
return EventDetailsDict(event_details_dict)
|
the-blue-alliance/the-blue-alliance
|
src/backend/common/queries/dict_converters/event_details_converter.py
|
Python
|
mit
| 2,401
|
import setuptools
with open("README.rst") as f:
long_description = f.read()
setuptools.setup(
name='django-diplomacy',
version="0.8.0",
author='Jeff Bradberry',
author_email='jeff.bradberry@gmail.com',
description='A play-by-web app for Diplomacy',
long_description=long_description,
long_description_content_type='test/x-rst',
url='http://github.com/jbradberry/django-diplomacy',
packages=setuptools.find_packages(),
entry_points={
'turngeneration.plugins': ['diplomacy = diplomacy.plugins:TurnGeneration'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Games/Entertainment :: Turn Based Strategy'
],
)
|
jbradberry/django-diplomacy
|
setup.py
|
Python
|
mit
| 915
|
"""Signal pattern matching."""
import re
from typing import Union
class PatternError(Exception):
"""Pattern error."""
class Pattern:
"""Signal pattern representation."""
PATTERN_REGEX = re.compile(r"[01xX]+")
PATTERN_REGEX_BYTES = re.compile(b"[01xX]+")
def __init__(self, pattern: Union[str, bytes]):
"""Initialize."""
if not isinstance(pattern, (str, bytes)):
raise TypeError("pattern must be a string or bytes")
# tolerate some variations
if isinstance(pattern, str):
if pattern.endswith("h"):
pattern = self.hex_to_bin(pattern)
if pattern.startswith("0b"):
pattern = pattern[2:]
m = self.PATTERN_REGEX.match(pattern)
elif isinstance(pattern, int):
self._pattern = bin(pattern)
return
else:
m = self.PATTERN_REGEX_BYTES.match(pattern)
if m is None:
raise PatternError(f"pattern is invalid: {pattern}")
self._pattern = pattern
@property
def pattern(self):
"""Get pattern."""
return self._pattern
def __repr__(self):
"""Get representation."""
return self.pattern
def __len__(self):
"""Get length."""
return len(self._pattern)
def match(self, value: Union[str, bytes]) -> bool:
"""Match against value."""
if not isinstance(value, (str, bytes)):
raise TypeError(
f"value must be string or bytes, got {type(value)}"
)
if type(value) != type(self._pattern):
raise TypeError("incompatible types for value and pattern")
pattern = self._pattern
if len(value) < len(self._pattern):
# zero-extend incomin value
count = len(self._pattern) - len(value)
value = "0" * count + value
elif len(value) > len(self._pattern):
# zero-extend pattern
count = len(value) - len(self._pattern)
pattern = "0" * count + self._pattern
for value_bit, expected_bit in zip(value, pattern):
if expected_bit in ("x", "X"):
# don't care
continue
if expected_bit != value_bit:
return False
return True
@staticmethod
def hex_to_bin(hexstr):
"""Convert hex to binary including don't cares."""
if hexstr.endswith("h"):
hexstr = hexstr[:-1]
hexstr = hexstr.replace("x", "X")
split = hexstr.split("X")
ret = ""
for fragment in split:
ret += bin(int(fragment, 16)) if fragment else "xxxx"
return ret
|
brunosmmm/hdltools
|
hdltools/patterns/__init__.py
|
Python
|
mit
| 2,709
|
#!/usr/bin/python
import re
import time
import subprocess
out=subprocess.check_output(["cat","/sys/class/net/eth0/address"])
print out
print "hi"
|
vtill/SecyrIT
|
test/openvpn/main.py
|
Python
|
mit
| 149
|
# file: numpy_pi.py
"""Calculating pi with Monte Carlo Method and NumPy.
"""
from __future__ import print_function
import numpy #1
@profile
def pi_numpy(total): #2
"""Compute pi.
"""
x = numpy.random.rand(total) #3
y = numpy.random.rand(total) #4
dist = numpy.sqrt(x * x + y * y) #5
count_inside = len(dist[dist < 1]) #6
return 4.0 * count_inside / total
if __name__ == '__main__':
def test():
"""Time the execution.
"""
import timeit
start = timeit.default_timer()
pi_numpy(int(1e6))
print('run time', timeit.default_timer() - start)
test()
|
rawrgulmuffins/presentation_notes
|
pycon2016/tutorials/measure_dont_guess/handout/pi/numpy_pi.py
|
Python
|
mit
| 834
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 17:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wunderlist', '0011_auto_20151230_1843'),
]
operations = [
migrations.AlterField(
model_name='connection',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connections', to=settings.AUTH_USER_MODEL),
),
]
|
passuf/WunderHabit
|
wunderlist/migrations/0012_auto_20151230_1853.py
|
Python
|
mit
| 631
|
import os
from channels.asgi import get_channel_layer
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ena.settings")
channel_layer = get_channel_layer()
|
froggleston/ena_search_django
|
ena/ena/asgi.py
|
Python
|
mit
| 156
|
from rest_framework.serializers import (
HyperlinkedIdentityField,
ModelSerializer,
SerializerMethodField,
)
from comments.api.serializers import CommentSerializer
from accounts.api.serializers import UserDetailSerializer
from comments.models import Comment
from posts.models import Post
class PostCreateUpdateSerializer(ModelSerializer):
class Meta:
model = Post
fields = [
#'id',
'title',
#'slug',
'content',
'publish',
]
post_detail_url = HyperlinkedIdentityField(
view_name = 'posts-api:detail',
lookup_field = 'slug',
)
class PostDetailSerializer(ModelSerializer):
url = post_detail_url
user = UserDetailSerializer(read_only=True)
image = SerializerMethodField()
html = SerializerMethodField()
comments = SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'id',
'user',
'title',
'slug',
'content',
'html',
'publish',
'image',
'comments',
]
def get_html(self, obj):
return obj.get_markdown()
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
def get_comments(self, obj):
#content_type = obj.get_content_type
#object_id = obj.id
c_qs = Comment.objects.filter_by_instance(obj)
comments = CommentSerializer(c_qs, many=True).data
return comments
class PostListSerializer(ModelSerializer):
url = post_detail_url
user = UserDetailSerializer(read_only=True)
class Meta:
model = Post
fields = [
'url',
'user',
'title',
'slug',
'content',
'publish',
]
|
rohitkyadav/blog-api
|
src/posts/api/serializers.py
|
Python
|
mit
| 1,539
|
import json
import requests
class SlackNotification(object):
icon_url = "https://github-bogdal.s3.amazonaws.com/freepacktbook/icon.png"
def __init__(self, slack_url, channel):
self.slack_url = slack_url
self.channel = channel
if not self.channel.startswith("#"):
self.channel = "#%s" % (self.channel,)
def notify(self, data):
if not all([self.slack_url, self.channel]):
return
payload = {
"channel": self.channel,
"username": "PacktPub Free Learning",
"icon_url": self.icon_url,
"attachments": [
{
"fallback": "Today's Free eBook: %s" % data["title"],
"pretext": "Today's Free eBook:",
"title": data["title"],
"title_link": data["book_url"],
"color": "#ff7f00",
"text": "%s\n%s" % (data["description"], data.get("url", "")),
"thumb_url": data["image_url"].replace(" ", "%20"),
}
],
}
requests.post(self.slack_url, data={"payload": json.dumps(payload)})
|
bogdal/freepacktbook
|
freepacktbook/slack.py
|
Python
|
mit
| 1,181
|
"""
KINCluster is clustering like KIN.
release note:
- version 0.1.6
fix settings
update pipeline
delete unused arguments
fix convention by pylint
now logging
- version 0.1.5.5
fix using custom settings
support both moudle and dict
- version 0.1.5.4
Update tokenizer, remove stopwords eff
- version 0.1.5.3
now custom setting available.
see settings.py
- version 0.1.5.2
change item, extractor, pipeline module
now, pipeline.dress_item pass just item(extractor.dump)
fix prev versions error (too many value to unpack)
"""
__version__ = '0.1.6'
__all__ = ['KINCluster',
'Cluster', 'Extractor', 'Item', 'Pipeline',
'tokenizer', 'stopwords']
from KINCluster.KINCluster import KINCluster
from KINCluster.core.cluster import Cluster
from KINCluster.core.extractor import Extractor
from KINCluster.core.item import Item
from KINCluster.core.pipeline import Pipeline
from KINCluster.lib.tokenizer import tokenizer
from KINCluster.lib.stopwords import stopwords
|
memento7/KINCluster
|
KINCluster/__init__.py
|
Python
|
mit
| 1,038
|
import sublime, sublime_plugin
class SaveAllExistingFilesCommand(sublime_plugin.ApplicationCommand):
def run(self):
for w in sublime.windows():
self._save_files_in_window(w)
def _save_files_in_window(self, w):
for v in w.views():
self._save_existing_file_in_view(v)
def _save_existing_file_in_view(self, v):
if v.file_name() and v.is_dirty():
v.run_command("save")
r"""
append to file sublime plugin OR api
sublime save dirty file plugin stackoverflow
"""
|
app-git-hub/SendTo
|
examples/save.py
|
Python
|
mit
| 481
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asteria.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
tunegoon/asteria
|
manage.py
|
Python
|
mit
| 250
|
from collections import deque
import time
import requests
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIA = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
# Platforms
platforms = {
BRAZIL: 'BR1',
EUROPE_NORDIC_EAST: 'EUN1',
EUROPE_WEST: 'EUW1',
KOREA: 'KR',
LATIN_AMERICA_NORTH: 'LA1',
LATIN_AMERICA_SOUTH: 'LA2',
NORTH_AMERICA: 'NA1',
OCEANIA: 'OC1',
RUSSIA: 'RU',
TURKEY: 'TR1'
}
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
'ASCENSION_5x5', # Ascension games
'HEXAKILL', # 6v6 games on twisted treeline
'KING_PORO_5x5', # King Poro game games
'COUNTER_PICK', # Nemesis games,
'BILGEWATER_5x5', # Black Market Brawlers games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
{'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'ASCENSION', # Ascension games
'FIRSTBLOOD', # Snowdown Showdown games
'KINGPORO', # King Poro games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
'ASCENSION', # Ascension games
'HEXAKILL', # Twisted Treeline 6x6 Hexakill
'KING_PORO', # King Poro games
'COUNTER_PICK', # Nemesis games
'BILGEWATER', # Black Market Brawlers games
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
'NightmareBot', # Summoner's Rift games played against Nightmare AI
'Hexakill', # Twisted Treeline 6x6 Hexakill games
'KingPoro', # King Poro games
'CounterPick', # Nemesis games
'Bilgewater', # Black Market Brawlers games
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
api_versions = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchhistory': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error, response):
self.error = error
self.response = response
def __str__(self):
return self.error
error_400 = "Bad request"
error_401 = "Unauthorized"
error_404 = "Game data not found"
error_429 = "Too many requests"
error_500 = "Internal server error"
error_503 = "Service unavailable"
def raise_status(response):
if response.status_code == 400:
raise LoLException(error_400, response)
elif response.status_code == 401:
raise LoLException(error_401, response)
elif response.status_code == 404:
raise LoLException(error_404, response)
elif response.status_code == 429:
raise LoLException(error_429, response)
elif response.status_code == 500:
raise LoLException(error_500, response)
elif response.status_code == 503:
raise LoLException(error_503, response)
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
self.key = key
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def _observer_mode_request(self, url, proxy=None, **kwargs):
if proxy is None:
proxy = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format(
proxy=proxy,
url=url
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
@staticmethod
def sanitized_name(name):
return name.replace(' ', '').lower()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# current-game-v1.0
def get_current_game(self, summoner_id, platform_id=None, region=None):
if platform_id is None:
platform_id = platforms[self.default_region]
return self._observer_mode_request(
'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format(
platform=platform_id,
summoner_id=summoner_id
),
region
)
# featured-game-v1.0
def get_featured_games(self, proxy=None):
return self._observer_mode_request('featured', proxy)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
def get_master(self, region=None, queue=solo_queue):
return self._league_request('master', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# lol-status-v1.0
@staticmethod
def get_server_status(region=None):
if region is None:
url = 'shards'
else:
url = 'shards/{region}'.format(region=region)
r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url))
raise_status(r)
return r.json()
# match history-v2.2
def _match_history_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchhistory/{end_url}'.format(
version=api_versions['matchhistory'],
end_url=end_url
),
region,
**kwargs
)
def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None,
end_index=None):
return self._match_history_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
beginIndex=begin_index,
endIndex=end_index
)
# match list-v2.2
def _match_list_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchlist/by-summoner/{end_url}'.format(
version=api_versions['matchlist'],
end_url=end_url,
),
region,
**kwargs
)
def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, seasons=None,
begin_time=None, end_time=None, begin_index=None, end_index=None):
return self._match_list_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championsIds=champion_ids,
rankedQueues=ranked_queues,
seasons=seasons,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(
summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, _id=None, region=None):
if (name is None) != (_id is None):
if name is not None:
name = self.sanitized_name(name)
return self.get_summoners(names=[name, ], region=region)[name]
else:
return self.get_summoners(ids=[_id, ], region=region)[str(_id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
|
gnozell/Yar-Ha-Har
|
lib/riotwatcher/riotwatcher.py
|
Python
|
mit
| 22,700
|
#!python
# coding=utf-8
# Package level logger
import logging
logger = logging.getLogger("pocean")
logger.addHandler(logging.NullHandler())
__version__ = "1.0.0"
|
joefutrelle/pocean-core
|
pocean/__init__.py
|
Python
|
mit
| 164
|
from .message import *
from functools import wraps
import datetime
import pymongo
import re
from app import session
class Singleton(type):
instance = None
def __call__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls.instance
class APIManager(metaclass=Singleton):
STATELESS_PROCESS = {
'오늘의 식단': FoodMessage,
'운영시간': TimeTableMessage,
'학식': PupilFoodMessage,
'교식': FacultyFoodMessage,
# '기식': DormFoodMessage,
'푸드코트': FoodCourtMessage,
'스낵코너': SnackCornerMessage,
'더 키친': TheKitchenMessage,
'버스': BusMessage,
'정문(20166)': BusFrontMessage,
'베라 앞(20165)': BusBeraMessage,
'중문(20169)': BusMiddleMessage,
'지하철': SubMessage,
'도서관': LibMessage,
}
PROCESS = {
'내일의 식단': [
{
'내일의 식단': TomorrowFoodMessage,
},
{
'학식': TomorrowPupilFoodMessage,
'교식': TomorrowFacultyFoodMessage,
# '기식': TomorrowDormFoodMessage,
'푸드코트': TomorrowFoodCourtMessage,
'스낵코너': TomorrowSnackCornerMessage,
'더 키친': TomorrowTheKitchenMessage,
},
],
# '도서관': [
# {
# '도서관': LibMessage,
# },
# {
# # 일단 예외로 둔다
# '*': OnGoingMessage,
# }
# ],
'식단 리뷰': [
{
'식단 리뷰': ReviewInitMessage,
},
{
'리뷰 보기': ReviewBrowseMessage,
'리뷰 남기기': ReviewPostMessage,
'리뷰 삭제하기': OnGoingMessage,
},
{
# 리뷰 남기기 하면 3단계까지 옴 키보드로 입력받은 문자열이 오기때문에 가능성이 다양함
'*': OnGoingMessage,
}
],
}
def handle_process(self, process, user_key, content):
"""
연속되는 문답이 필요한 항목들을 처리한다.
:return: Message Object
"""
if process == '도서관':
if '열람실' in content:
room = content[0] # '1 열람실 (이용률: 9.11%)'[0]하면 1만 빠져나온다
msg = LibStatMessage(room=room)
UserSessionAdmin.delete(user_key)
else:
UserSessionAdmin.delete(user_key)
return FailMessage('도서관 process에서 문제가 발생하였습니다 해당 세션을 초기화합니다.')
return msg
elif process == '식단 리뷰':
if content in self.PROCESS[process][1]:
new_msg = self.PROCESS[process][1][content]
if content in ['리뷰 보기', '리뷰 삭제']:
UserSessionAdmin.delete(user_key)
return new_msg()
else:
UserSessionAdmin.delete(user_key)
return ReviewPostSuccess(user_key, content)
elif process == '내일의 식단':
if content in self.PROCESS[process][1]:
new_msg = self.PROCESS[process][1][content]
UserSessionAdmin.delete(user_key)
else:
UserSessionAdmin.delete(user_key)
return FailMessage('내일의 식단 process에서 문제가 발생하였습니다 해당 세션을 초기화합니다.')
return new_msg()
return FailMessage('Unhandled process {}'.format(process))
def handle_stateless_process(self, user_key, content):
"""
연속적이지 않은 항목들을 처리한다.
:param user_key:
:param content:
:return: Message Object
"""
if content in self.PROCESS:
UserSessionAdmin.init_process(user_key, content)
new_msg = self.PROCESS[content][0][content]
return new_msg()
else:
new_msg = self.STATELESS_PROCESS[content]
return new_msg()
def get_msg(self, user_key, content):
has_session = UserSessionAdmin.check_user_key(user_key)
process = UserSessionAdmin.get_process(user_key)
if not has_session:
UserSessionAdmin.init(user_key, content)
if content == '취소':
UserSessionAdmin.delete(user_key)
return CancelMessage()
UserSessionAdmin.add_history(user_key, content)
if process:
return self.handle_process(process, user_key, content)
else:
return self.handle_stateless_process(user_key, content)
def process(self, stat, req=None):
if stat is 'home':
home_message = HomeMessage()
return home_message
elif stat is 'message':
content = req['content']
user_key = req['user_key']
return self.get_msg(user_key, content)
elif stat is 'fail':
log = req['log']
user_key = req['user_key']
fail_message = FailMessage('파악할수 없는 에러가 발생하여 해당 세션을 초기화 합니다\n{}'.format(log))
UserSessionAdmin.delete(user_key)
return fail_message
elif stat is 'etc':
return SuccessMessage()
elif stat is "scheduler":
return CronUpdateMessage()
elif stat is "refresh_tomorrow":
return CronUpdateTomorrowMessage()
else:
return FailMessage("stat not in list('home', 'message', 'fail')")
class SessionManager(metaclass=Singleton):
@staticmethod
def check_user_key(user_key):
if session.find_one({'user_key': user_key}):
return True
else:
return False
def verify_session(func):
@wraps(func)
def session_wrapper(*args, **kwargs):
user_key = args[1]
if session.find_one({'user_key': user_key}):
return func(*args, **kwargs)
else:
return False
return session_wrapper
def init(self, user_key, content=None, process=None):
session.insert_one({
'user_key': user_key,
'history': [content],
'process': process,
})
@verify_session
def delete(self, user_key):
session.remove({'user_key': user_key})
@verify_session
def add_history(self, user_key, content):
user = session.find_one({'user_key': user_key})
history = user['history']
history.append(content)
user.update({'history': history})
session.save(user)
@verify_session
def get_history(self, user_key):
user = session.find_one({'user_key': user_key})
history = user['history']
return history[:]
@verify_session
def init_process(self, user_key, process):
user = session.find_one({'user_key': user_key})
user.update({'process': process})
session.save(user)
@verify_session
def expire_process(self, user_key):
user = session.find_one({'user_key': user_key})
user.update({'process': None})
session.save(user)
@verify_session
def get_process(self, user_key):
user = session.find_one({'user_key': user_key})
return user['process']
class DBManager:
def __init__(self):
_conn = pymongo.MongoClient()
_food_db = _conn.food_db
self.hakusiku = _food_db.hakusiku
self.review = _food_db.review
self.ban_list = _food_db.ban_list
if self._get_black_list() is None:
self.ban_list.insert_one({'black_list': []})
def get_hakusiku_data(self, date=None):
date = date or datetime.date.today()
date_str = date.__str__()
data = self.hakusiku.find_one({'날짜': date_str})
return data
def set_hakusiku_data(self, data, date=None):
date = date or datetime.date.today()
date_str = date.__str__()
if self.get_hakusiku_data(date=date_str) is None:
self.hakusiku.insert_one(data)
else:
self.hakusiku.replace_one({"날짜": date_str}, data)
def is_banned_user(self, user_key):
return True if user_key in self._get_black_list() else False
def _get_black_list(self):
return self.ban_list.find_one({}, {'_id': 0, 'black_list': 1})
def ban_user(self, user_key):
black_list = self._get_black_list()
black_list.append(user_key)
def get_review(self):
date = datetime.date.today().__str__()
data = self.review.find_one({'날짜': date}) or self.init_review()
return data
def init_review(self):
date = datetime.date.today().__str__()
self.review.insert_one({
'날짜': date,
'리뷰': [],
})
return self.get_review()
def append_review(self, user_key: str, new_review: str):
def count_user_key(lst):
# TODO: mongodb 기능에 count 하는게 있을듯 그걸로 대체
s = 0
for i in lst:
if i.get('user_key') == user_key:
s += 1
return s
def remove_special_char(src):
return re.sub("[!@#$%^&*()]", "", src)
review = self.get_review()
if count_user_key(review['리뷰']) < 5:
review['리뷰'].append({'user_key': user_key, 'content': remove_special_char(new_review)})
self.review.find_one_and_replace({'날짜': datetime.date.today().__str__()}, review)
else:
raise Exception('5회 이상 작성하셨습니다.')
APIAdmin = APIManager()
UserSessionAdmin = SessionManager()
DBAdmin = DBManager()
|
gomjellie/SoongSiri
|
legacy_codes/app/managers.py
|
Python
|
mit
| 10,044
|
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="aloft.py",
version="0.0.4",
author="Nate Mara",
author_email="natemara@gmail.com",
description="A simple API for getting winds aloft data from NOAA",
license="MIT",
test_suite="tests",
keywords="aviation weather winds aloft",
url="https://github.com/natemara/aloft.py",
packages=['aloft'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
install_requires=required,
)
|
natemara/aloft.py
|
setup.py
|
Python
|
mit
| 568
|
from django.apps import AppConfig
class AutodoappConfig(AppConfig):
name = 'AutoDoApp'
|
AutoDo/AutoDo
|
AutoDoApp/apps.py
|
Python
|
mit
| 93
|
"""
==================================================
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
==================================================
.. currentmodule:: scipy.sparse.linalg
Abstract linear operators
-------------------------
.. autosummary::
:toctree: generated/
LinearOperator -- abstract representation of a linear operator
aslinearoperator -- convert an object to an abstract linear operator
Matrix Operations
-----------------
.. autosummary::
:toctree: generated/
inv -- compute the sparse matrix inverse
expm -- compute the sparse matrix exponential
expm_multiply -- compute the product of a matrix exponential and a matrix
Matrix norms
------------
.. autosummary::
:toctree: generated/
norm -- Norm of a sparse matrix
onenormest -- Estimate the 1-norm of a sparse matrix
Solving linear problems
-----------------------
Direct methods for linear equation systems:
.. autosummary::
:toctree: generated/
spsolve -- Solve the sparse linear system Ax=b
factorized -- Pre-factorize matrix to a function solving a linear system
MatrixRankWarning -- Warning on exactly singular matrices
use_solver -- Select direct solver to use
Iterative methods for linear equation systems:
.. autosummary::
:toctree: generated/
bicg -- Use BIConjugate Gradient iteration to solve A x = b
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b
cg -- Use Conjugate Gradient iteration to solve A x = b
cgs -- Use Conjugate Gradient Squared iteration to solve A x = b
gmres -- Use Generalized Minimal RESidual iteration to solve A x = b
lgmres -- Solve a matrix equation using the LGMRES algorithm
minres -- Use MINimum RESidual iteration to solve Ax = b
qmr -- Use Quasi-Minimal Residual iteration to solve A x = b
Iterative methods for least-squares problems:
.. autosummary::
:toctree: generated/
lsqr -- Find the least-squares solution to a sparse linear equation system
lsmr -- Find the least-squares solution to a sparse linear equation system
Matrix factorizations
---------------------
Eigenvalue problems:
.. autosummary::
:toctree: generated/
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
Singular values problems:
.. autosummary::
:toctree: generated/
svds -- Compute k singular values/vectors for a sparse matrix
Complete or incomplete LU factorizations
.. autosummary::
:toctree: generated/
splu -- Compute a LU decomposition for a sparse matrix
spilu -- Compute an incomplete LU decomposition for a sparse matrix
SuperLU -- Object representing an LU factorization
Exceptions
----------
.. autosummary::
:toctree: generated/
ArpackNoConvergence
ArpackError
"""
from __future__ import division, print_function, absolute_import
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/sparse/linalg/__init__.py
|
Python
|
mit
| 3,095
|
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from reversion.admin import VersionAdmin
from django.contrib.gis import admin
from .models import Feedback,Topic
@admin.register(Feedback)
class FeedbackAdmin(VersionAdmin, admin.ModelAdmin):
model = Feedback
list_display = ['name', 'email', 'topic', 'message']
save_as = True
ordering = ['topic']
@admin.register(Topic)
class TopicAdmin(VersionAdmin, admin.ModelAdmin):
model = Topic
list_display = ['name']
save_as = True
ordering = ['name']
|
ngageoint/geoq
|
geoq/agents/admin.py
|
Python
|
mit
| 710
|
import py
from rpython.rlib.jit import JitDriver, hint, set_param
from rpython.rlib.jit import unroll_safe, dont_look_inside, promote
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.debug import fatalerror
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.rtyper.annlowlevel import hlstr
from rpython.jit.metainterp.warmspot import get_stats
from rpython.jit.backend.llsupport import codemap
class RecursiveTests:
def test_simple_recursion(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm'])
def f(n):
m = n - 2
while True:
myjitdriver.jit_merge_point(n=n, m=m)
n -= 1
if m == n:
return main(n) * 2
myjitdriver.can_enter_jit(n=n, m=m)
def main(n):
if n > 0:
return f(n+1)
else:
return 1
res = self.meta_interp(main, [20], enable_opts='')
assert res == main(20)
self.check_history(call_i=0)
def test_simple_recursion_with_exc(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm'])
class Error(Exception):
pass
def f(n):
m = n - 2
while True:
myjitdriver.jit_merge_point(n=n, m=m)
n -= 1
if n == 10:
raise Error
if m == n:
try:
return main(n) * 2
except Error:
return 2
myjitdriver.can_enter_jit(n=n, m=m)
def main(n):
if n > 0:
return f(n+1)
else:
return 1
res = self.meta_interp(main, [20], enable_opts='')
assert res == main(20)
def test_recursion_three_times(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm', 'total'])
def f(n):
m = n - 3
total = 0
while True:
myjitdriver.jit_merge_point(n=n, m=m, total=total)
n -= 1
total += main(n)
if m == n:
return total + 5
myjitdriver.can_enter_jit(n=n, m=m, total=total)
def main(n):
if n > 0:
return f(n)
else:
return 1
print
for i in range(1, 11):
print '%3d %9d' % (i, f(i))
res = self.meta_interp(main, [10], enable_opts='')
assert res == main(10)
self.check_enter_count_at_most(11)
def test_bug_1(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'i', 'stack'])
def opaque(n, i):
if n == 1 and i == 19:
for j in range(20):
res = f(0) # recurse repeatedly, 20 times
assert res == 0
def f(n):
stack = [n]
i = 0
while i < 20:
myjitdriver.can_enter_jit(n=n, i=i, stack=stack)
myjitdriver.jit_merge_point(n=n, i=i, stack=stack)
opaque(n, i)
i += 1
return stack.pop()
res = self.meta_interp(f, [1], enable_opts='', repeat=2,
policy=StopAtXPolicy(opaque))
assert res == 1
def get_interpreter(self, codes):
ADD = "0"
JUMP_BACK = "1"
CALL = "2"
EXIT = "3"
def getloc(i, code):
return 'code="%s", i=%d' % (code, i)
jitdriver = JitDriver(greens = ['i', 'code'], reds = ['n'],
get_printable_location = getloc)
def interpret(codenum, n, i):
code = codes[codenum]
while i < len(code):
jitdriver.jit_merge_point(n=n, i=i, code=code)
op = code[i]
if op == ADD:
n += 1
i += 1
elif op == CALL:
n = interpret(1, n, 1)
i += 1
elif op == JUMP_BACK:
if n > 20:
return 42
i -= 2
jitdriver.can_enter_jit(n=n, i=i, code=code)
elif op == EXIT:
return n
else:
raise NotImplementedError
return n
return interpret
def test_inline(self):
code = "021"
subcode = "00"
codes = [code, subcode]
f = self.get_interpreter(codes)
assert self.meta_interp(f, [0, 0, 0], enable_opts='') == 42
self.check_resops(call_may_force_i=1, int_add=1, call=0)
assert self.meta_interp(f, [0, 0, 0], enable_opts='',
inline=True) == 42
self.check_resops(call=0, int_add=2, call_may_force_i=0,
guard_no_exception=0)
def test_inline_jitdriver_check(self):
code = "021"
subcode = "100"
codes = [code, subcode]
f = self.get_interpreter(codes)
assert self.meta_interp(f, [0, 0, 0], enable_opts='',
inline=True) == 42
# the call is fully inlined, because we jump to subcode[1], thus
# skipping completely the JUMP_BACK in subcode[0]
self.check_resops(call=0, call_may_force=0, call_assembler=0)
def test_guard_failure_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
n = f("---i---", n)
elif op == "i":
if n % 5 == 1:
return n
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
print main(100)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == 0
def test_guard_failure_and_then_exception_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n', 'flag'],
get_printable_location=p)
def f(code, n):
pc = 0
flag = False
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc, flag=flag)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
try:
n = f("---ir---", n)
except Exception:
return n
elif op == "i":
if n < 200:
flag = True
elif op == "r":
if flag:
raise Exception
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0, flag=flag)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
print main(1000)
res = self.meta_interp(main, [1000], enable_opts='', inline=True)
assert res == main(1000)
def test_exception_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
class Exc(Exception):
pass
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
try:
n = f("---i---", n)
except Exc:
pass
elif op == "i":
if n % 5 == 1:
raise Exc
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == main(100)
def test_recurse_during_blackholing(self):
# this passes, if the blackholing shortcut for calls is turned off
# it fails, it is very delicate in terms of parameters,
# bridge/loop creation order
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
if n < 70 and n % 3 == 1:
n = f("--", n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
set_param(None, 'threshold', 3)
set_param(None, 'trace_eagerness', 5)
return f("c-l", n)
expected = main(100)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == expected
def check_max_trace_length(self, length):
for loop in get_stats().loops:
assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode
for op in loop.operations:
if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'):
assert len(op.getdescr()._debug_suboperations) <= length + 5
def test_inline_trace_limit(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
def recursive(n):
if n > 0:
return recursive(n - 1) + 1
return 0
def loop(n):
set_param(myjitdriver, "threshold", 10)
pc = 0
while n:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = recursive(n)
n -= 1
return n
TRACE_LIMIT = 66
res = self.meta_interp(loop, [100], enable_opts='', inline=True, trace_limit=TRACE_LIMIT)
assert res == 0
self.check_max_trace_length(TRACE_LIMIT)
self.check_enter_count_at_most(10) # maybe
self.check_aborted_count(6)
def test_trace_limit_bridge(self):
def recursive(n):
if n > 0:
return recursive(n - 1) + 1
return 0
myjitdriver = JitDriver(greens=[], reds=['n'])
def loop(n):
set_param(None, "threshold", 4)
set_param(None, "trace_eagerness", 2)
while n:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
if n % 5 == 0:
n -= 1
if n < 50:
n = recursive(n)
n -= 1
return n
TRACE_LIMIT = 20
res = self.meta_interp(loop, [100], enable_opts='', inline=True, trace_limit=TRACE_LIMIT)
self.check_max_trace_length(TRACE_LIMIT)
self.check_aborted_count(8)
self.check_enter_count_at_most(30)
def test_trace_limit_with_exception_bug(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
@unroll_safe
def do_stuff(n):
while n > 0:
n -= 1
raise ValueError
def loop(n):
pc = 0
while n > 80:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
try:
do_stuff(n)
except ValueError:
# the trace limit is checked when we arrive here, and we
# have the exception still in last_exc_value_box at this
# point -- so when we abort because of a trace too long,
# the exception is passed to the blackhole interp and
# incorrectly re-raised from here
pass
n -= 1
return n
TRACE_LIMIT = 66
res = self.meta_interp(loop, [100], trace_limit=TRACE_LIMIT)
assert res == 80
def test_max_failure_args(self):
FAILARGS_LIMIT = 10
jitdriver = JitDriver(greens = [], reds = ['i', 'n', 'o'])
class A(object):
def __init__(self, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9):
self.i0 = i0
self.i1 = i1
self.i2 = i2
self.i3 = i3
self.i4 = i4
self.i5 = i5
self.i6 = i6
self.i7 = i7
self.i8 = i8
self.i9 = i9
def loop(n):
i = 0
o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
while i < n:
jitdriver.can_enter_jit(o=o, i=i, n=n)
jitdriver.jit_merge_point(o=o, i=i, n=n)
o = A(i, i + 1, i + 2, i + 3, i + 4, i + 5,
i + 6, i + 7, i + 8, i + 9)
i += 1
return o
res = self.meta_interp(loop, [20], failargs_limit=FAILARGS_LIMIT,
listops=True)
self.check_aborted_count(4)
def test_max_failure_args_exc(self):
FAILARGS_LIMIT = 10
jitdriver = JitDriver(greens = [], reds = ['i', 'n', 'o'])
class A(object):
def __init__(self, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9):
self.i0 = i0
self.i1 = i1
self.i2 = i2
self.i3 = i3
self.i4 = i4
self.i5 = i5
self.i6 = i6
self.i7 = i7
self.i8 = i8
self.i9 = i9
def loop(n):
i = 0
o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
while i < n:
jitdriver.can_enter_jit(o=o, i=i, n=n)
jitdriver.jit_merge_point(o=o, i=i, n=n)
o = A(i, i + 1, i + 2, i + 3, i + 4, i + 5,
i + 6, i + 7, i + 8, i + 9)
i += 1
raise ValueError
def main(n):
try:
loop(n)
return 1
except ValueError:
return 0
res = self.meta_interp(main, [20], failargs_limit=FAILARGS_LIMIT,
listops=True)
assert not res
self.check_aborted_count(4)
def test_set_param_inlining(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'recurse'])
def loop(n, recurse=False):
while n:
myjitdriver.jit_merge_point(n=n, recurse=recurse)
n -= 1
if not recurse:
loop(10, True)
myjitdriver.can_enter_jit(n=n, recurse=recurse)
return n
TRACE_LIMIT = 66
def main(inline):
set_param(None, "threshold", 10)
set_param(None, 'function_threshold', 60)
if inline:
set_param(None, 'inlining', True)
else:
set_param(None, 'inlining', False)
return loop(100)
res = self.meta_interp(main, [0], enable_opts='', trace_limit=TRACE_LIMIT)
self.check_resops(call=0, call_may_force_i=1)
res = self.meta_interp(main, [1], enable_opts='', trace_limit=TRACE_LIMIT)
self.check_resops(call=0, call_may_force=0)
def test_trace_from_start(self):
def p(pc, code):
code = hlstr(code)
return "'%s' at %d: %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "+":
n += 7
elif op == "-":
n -= 1
elif op == "c":
n = f('---', n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=1)
pc = 1
continue
else:
assert 0
pc += 1
return n
def g(m):
if m > 1000000:
f('', 0)
result = 0
for i in range(m):
result += f('+-cl--', i)
res = self.meta_interp(g, [50], backendopt=True)
assert res == g(50)
py.test.skip("tracing from start is by now only longer enabled "
"if a trace gets too big")
self.check_tree_loop_count(3)
self.check_history(int_add=1)
def test_dont_inline_huge_stuff(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p,
is_recursive=True)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
f('--------------------', n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def g(m):
set_param(None, 'inlining', True)
# carefully chosen threshold to make sure that the inner function
# cannot be inlined, but the inner function on its own is small
# enough
set_param(None, 'trace_limit', 40)
if m > 1000000:
f('', 0)
result = 0
for i in range(m):
result += f('-c-----------l-', i+100)
self.meta_interp(g, [10], backendopt=True)
self.check_aborted_count(1)
self.check_resops(call=0, call_assembler_i=2)
self.check_jitcell_token_count(2)
def test_directly_call_assembler(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i)
driver.jit_merge_point(codeno = codeno, i = i)
if codeno == 2:
portal(1)
i += 1
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_n=1)
def test_recursion_cant_call_assembler_directly(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'j'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, j):
i = 1
while 1:
driver.jit_merge_point(codeno=codeno, i=i, j=j)
if (i >> 1) == 1:
if j == 0:
return
portal(2, j - 1)
elif i == 5:
return
i += 1
driver.can_enter_jit(codeno=codeno, i=i, j=j)
portal(2, 5)
from rpython.jit.metainterp import compile, pyjitpl
pyjitpl._warmrunnerdesc = None
trace = []
def my_ctc(*args):
looptoken = original_ctc(*args)
trace.append(looptoken)
return looptoken
original_ctc = compile.compile_tmp_callback
try:
compile.compile_tmp_callback = my_ctc
self.meta_interp(portal, [2, 5], inline=True)
self.check_resops(call_may_force=0, call_assembler_n=2)
finally:
compile.compile_tmp_callback = original_ctc
# check that we made a temporary callback
assert len(trace) == 1
# and that we later redirected it to something else
try:
redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler
except AttributeError:
pass # not the llgraph backend
else:
print redirected
assert redirected.keys() == trace
def test_recursion_cant_call_assembler_directly_with_virtualizable(self):
# exactly the same logic as the previous test, but with 'frame.j'
# instead of just 'j'
class Frame(object):
_virtualizable_ = ['j']
def __init__(self, j):
self.j = j
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, frame):
i = 1
while 1:
driver.jit_merge_point(codeno=codeno, i=i, frame=frame)
if (i >> 1) == 1:
if frame.j == 0:
return
portal(2, Frame(frame.j - 1))
elif i == 5:
return
i += 1
driver.can_enter_jit(codeno=codeno, i=i, frame=frame)
def main(codeno, j):
portal(codeno, Frame(j))
main(2, 5)
from rpython.jit.metainterp import compile, pyjitpl
pyjitpl._warmrunnerdesc = None
trace = []
def my_ctc(*args):
looptoken = original_ctc(*args)
trace.append(looptoken)
return looptoken
original_ctc = compile.compile_tmp_callback
try:
compile.compile_tmp_callback = my_ctc
self.meta_interp(main, [2, 5], inline=True)
self.check_resops(call_may_force=0, call_assembler_n=2)
finally:
compile.compile_tmp_callback = original_ctc
# check that we made a temporary callback
assert len(trace) == 1
# and that we later redirected it to something else
try:
redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler
except AttributeError:
pass # not the llgraph backend
else:
print redirected
assert redirected.keys() == trace
def test_directly_call_assembler_return(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
k = codeno
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i, k = k)
driver.jit_merge_point(codeno = codeno, i = i, k = k)
if codeno == 2:
k = portal(1)
i += 1
return k
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_i=1)
def test_directly_call_assembler_raise(self):
class MyException(Exception):
def __init__(self, x):
self.x = x
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i)
driver.jit_merge_point(codeno = codeno, i = i)
if codeno == 2:
try:
portal(1)
except MyException as me:
i += me.x
i += 1
if codeno == 1:
raise MyException(1)
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_n=1)
def test_directly_call_assembler_fail_guard(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, k):
i = 0
while i < 10:
driver.can_enter_jit(codeno=codeno, i=i, k=k)
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno == 2:
k += portal(1, k)
elif k > 40:
if i % 2:
k += 1
else:
k += 2
k += 1
i += 1
return k
res = self.meta_interp(portal, [2, 0], inline=True)
assert res == 13542
def test_directly_call_assembler_virtualizable(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 's', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
def main(codeno):
frame = Frame()
frame.thing = Thing(0)
result = portal(codeno, frame)
return result
def portal(codeno, frame):
i = 0
s = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i, s=s)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i, s=s)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
s += subframe.thing.val
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val + s
res = self.meta_interp(main, [0], inline=True)
self.check_resops(call=0, cond_call=2)
assert res == main(0)
def test_directly_call_assembler_virtualizable_reset_token(self):
py.test.skip("not applicable any more, I think")
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib.debug import llinterpcall
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
@dont_look_inside
def check_frame(subframe):
if we_are_translated():
llinterpcall(lltype.Void, check_ll_frame, subframe)
def check_ll_frame(ll_subframe):
# This is called with the low-level Struct that is the frame.
# Check that the vable_token was correctly reset to zero.
# Note that in order for that test to catch failures, it needs
# three levels of recursion: the vable_token of the subframe
# at the level 2 is set to a non-zero value when doing the
# call to the level 3 only. This used to fail when the test
# is run via rpython.jit.backend.x86.test.test_recursive.
from rpython.jit.metainterp.virtualizable import TOKEN_NONE
assert ll_subframe.vable_token == TOKEN_NONE
def main(codeno):
frame = Frame()
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 5:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno < 2:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(codeno + 1, subframe)
check_frame(subframe)
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val
res = self.meta_interp(main, [0], inline=True)
assert res == main(0)
def test_directly_call_assembler_virtualizable_force1(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
def change(newthing):
somewhere_else.frame.thing = newthing
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
print 'ENTER:', codeno, frame.thing.val
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
elif codeno == 1:
if frame.thing.val > 40:
change(Thing(13))
nextval = 13
else:
fatalerror("bad codeno = " + str(codeno))
frame.thing = Thing(nextval + 1)
i += 1
print 'LEAVE:', codeno, frame.thing.val
return frame.thing.val
res = self.meta_interp(main, [0], inline=True,
policy=StopAtXPolicy(change))
assert res == main(0)
def test_directly_call_assembler_virtualizable_with_array(self):
myjitdriver = JitDriver(greens = ['codeno'], reds = ['n', 'x', 'frame'],
virtualizables = ['frame'])
class Frame(object):
_virtualizable_ = ['l[*]', 's']
def __init__(self, l, s):
self = hint(self, access_directly=True,
fresh_virtualizable=True)
self.l = l
self.s = s
def main(codeno, n, a):
frame = Frame([a, a+1, a+2, a+3], 0)
return f(codeno, n, a, frame)
def f(codeno, n, a, frame):
x = 0
while n > 0:
myjitdriver.can_enter_jit(codeno=codeno, frame=frame, n=n, x=x)
myjitdriver.jit_merge_point(codeno=codeno, frame=frame, n=n,
x=x)
frame.s = promote(frame.s)
n -= 1
s = frame.s
assert s >= 0
x += frame.l[s]
frame.s += 1
if codeno == 0:
subframe = Frame([n, n+1, n+2, n+3], 0)
x += f(1, 10, 1, subframe)
s = frame.s
assert s >= 0
x += frame.l[s]
x += len(frame.l)
frame.s -= 1
return x
res = self.meta_interp(main, [0, 10, 1], listops=True, inline=True)
assert res == main(0, 10, 1)
def test_directly_call_assembler_virtualizable_force_blackhole(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
def change(newthing, arg):
print arg
if arg > 30:
somewhere_else.frame.thing = newthing
arg = 13
return arg
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
else:
nextval = change(Thing(13), frame.thing.val)
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val
res = self.meta_interp(main, [0], inline=True,
policy=StopAtXPolicy(change))
assert res == main(0)
def test_assembler_call_red_args(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def residual(k):
if k > 150:
return 0
return 1
def portal(codeno, k):
i = 0
while i < 15:
driver.can_enter_jit(codeno=codeno, i=i, k=k)
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno == 2:
k += portal(residual(k), k)
if codeno == 0:
k += 2
elif codeno == 1:
k += 1
i += 1
return k
res = self.meta_interp(portal, [2, 0], inline=True,
policy=StopAtXPolicy(residual))
assert res == portal(2, 0)
self.check_resops(call_assembler_i=4)
def test_inline_without_hitting_the_loop(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i)
if codeno < 10:
i += portal(20)
codeno += 1
elif codeno == 10:
if i > 63:
return i
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i)
else:
return 1
assert portal(0) == 70
res = self.meta_interp(portal, [0], inline=True)
assert res == 70
self.check_resops(call_assembler=0)
def test_inline_with_hitting_the_loop_sometimes(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, k):
if k > 2:
return 1
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno < 10:
i += portal(codeno + 5, k+1)
codeno += 1
elif codeno == 10:
if i > [-1, 2000, 63][k]:
return i
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i, k=k)
else:
return 1
assert portal(0, 1) == 2095
res = self.meta_interp(portal, [0, 1], inline=True)
assert res == 2095
self.check_resops(call_assembler_i=12)
def test_inline_with_hitting_the_loop_sometimes_exc(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
class GotValue(Exception):
def __init__(self, result):
self.result = result
def portal(codeno, k):
if k > 2:
raise GotValue(1)
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno < 10:
try:
portal(codeno + 5, k+1)
except GotValue as e:
i += e.result
codeno += 1
elif codeno == 10:
if i > [-1, 2000, 63][k]:
raise GotValue(i)
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i, k=k)
else:
raise GotValue(1)
def main(codeno, k):
try:
portal(codeno, k)
except GotValue as e:
return e.result
assert main(0, 1) == 2095
res = self.meta_interp(main, [0, 1], inline=True)
assert res == 2095
self.check_resops(call_assembler_n=12)
def test_inline_recursion_limit(self):
driver = JitDriver(greens = ["threshold", "loop"], reds=["i"])
@dont_look_inside
def f():
set_param(driver, "max_unroll_recursion", 10)
def portal(threshold, loop, i):
f()
if i > threshold:
return i
while True:
driver.jit_merge_point(threshold=threshold, loop=loop, i=i)
if loop:
portal(threshold, False, 0)
else:
portal(threshold, False, i + 1)
return i
if i > 10:
return 1
i += 1
driver.can_enter_jit(threshold=threshold, loop=loop, i=i)
res1 = portal(10, True, 0)
res2 = self.meta_interp(portal, [10, True, 0], inline=True)
assert res1 == res2
self.check_resops(call_assembler_i=2)
res1 = portal(9, True, 0)
res2 = self.meta_interp(portal, [9, True, 0], inline=True)
assert res1 == res2
self.check_resops(call_assembler=0)
def test_handle_jitexception_in_portal(self):
# a test for _handle_jitexception_in_portal in blackhole.py
driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'],
get_printable_location = lambda codeno: str(codeno))
def do_can_enter_jit(codeno, i, str):
i = (i+1)-1 # some operations
driver.can_enter_jit(codeno=codeno, i=i, str=str)
def intermediate(codeno, i, str):
if i == 9:
do_can_enter_jit(codeno, i, str)
def portal(codeno, str):
i = value.initial
while i < 10:
intermediate(codeno, i, str)
driver.jit_merge_point(codeno=codeno, i=i, str=str)
i += 1
if codeno == 64 and i == 10:
str = portal(96, str)
str += chr(codeno+i)
return str
class Value:
initial = -1
value = Value()
def main():
value.initial = 0
return (portal(64, '') +
portal(64, '') +
portal(64, '') +
portal(64, '') +
portal(64, ''))
assert main() == 'ABCDEFGHIabcdefghijJ' * 5
for tlimit in [95, 90, 102]:
print 'tlimit =', tlimit
res = self.meta_interp(main, [], inline=True, trace_limit=tlimit)
assert ''.join(res.chars) == 'ABCDEFGHIabcdefghijJ' * 5
def test_handle_jitexception_in_portal_returns_void(self):
# a test for _handle_jitexception_in_portal in blackhole.py
driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'],
get_printable_location = lambda codeno: str(codeno))
def do_can_enter_jit(codeno, i, str):
i = (i+1)-1 # some operations
driver.can_enter_jit(codeno=codeno, i=i, str=str)
def intermediate(codeno, i, str):
if i == 9:
do_can_enter_jit(codeno, i, str)
def portal(codeno, str):
i = value.initial
while i < 10:
intermediate(codeno, i, str)
driver.jit_merge_point(codeno=codeno, i=i, str=str)
i += 1
if codeno == 64 and i == 10:
portal(96, str)
str += chr(codeno+i)
class Value:
initial = -1
value = Value()
def main():
value.initial = 0
portal(64, '')
portal(64, '')
portal(64, '')
portal(64, '')
portal(64, '')
main()
for tlimit in [95, 90, 102]:
print 'tlimit =', tlimit
self.meta_interp(main, [], inline=True, trace_limit=tlimit)
def test_no_duplicates_bug(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno: str(codeno))
def portal(codeno, i):
while i > 0:
driver.can_enter_jit(codeno=codeno, i=i)
driver.jit_merge_point(codeno=codeno, i=i)
if codeno > 0:
break
portal(i, i)
i -= 1
self.meta_interp(portal, [0, 10], inline=True)
def test_trace_from_start_always(self):
from rpython.rlib.nonconst import NonConstant
driver = JitDriver(greens = ['c'], reds = ['i', 'v'])
def portal(c, i, v):
while i > 0:
driver.jit_merge_point(c=c, i=i, v=v)
portal(c, i - 1, v)
if v:
driver.can_enter_jit(c=c, i=i, v=v)
break
def main(c, i, _set_param, v):
if _set_param:
set_param(driver, 'function_threshold', 0)
portal(c, i, v)
self.meta_interp(main, [10, 10, False, False], inline=True)
self.check_jitcell_token_count(1)
self.check_trace_count(1)
self.meta_interp(main, [3, 10, True, False], inline=True)
self.check_jitcell_token_count(0)
self.check_trace_count(0)
def test_trace_from_start_does_not_prevent_inlining(self):
driver = JitDriver(greens = ['c', 'bc'], reds = ['i'])
def portal(bc, c, i):
while True:
driver.jit_merge_point(c=c, bc=bc, i=i)
if bc == 0:
portal(1, 8, 0)
c += 1
else:
return
if c == 10: # bc == 0
c = 0
if i >= 100:
return
driver.can_enter_jit(c=c, bc=bc, i=i)
i += 1
self.meta_interp(portal, [0, 0, 0], inline=True)
self.check_resops(call_may_force=0, call=0)
def test_dont_repeatedly_trace_from_the_same_guard(self):
driver = JitDriver(greens = [], reds = ['level', 'i'])
def portal(level):
if level == 0:
i = -10
else:
i = 0
#
while True:
driver.jit_merge_point(level=level, i=i)
if level == 25:
return 42
i += 1
if i <= 0: # <- guard
continue # first make a loop
else:
# then we fail the guard above, doing a recursive call,
# which will itself fail the same guard above, and so on
return portal(level + 1)
self.meta_interp(portal, [0])
self.check_trace_count_at_most(2) # and not, e.g., 24
def test_get_unique_id(self):
lst = []
def reg_codemap(self, (start, size, l)):
lst.append((start, size))
old_reg_codemap(self, (start, size, l))
old_reg_codemap = codemap.CodemapStorage.register_codemap
try:
codemap.CodemapStorage.register_codemap = reg_codemap
def get_unique_id(pc, code):
return (code + 1) * 2
driver = JitDriver(greens=["pc", "code"], reds='auto',
get_unique_id=get_unique_id, is_recursive=True)
def f(pc, code):
i = 0
while i < 10:
driver.jit_merge_point(pc=pc, code=code)
pc += 1
if pc == 3:
if code == 1:
f(0, 0)
pc = 0
i += 1
self.meta_interp(f, [0, 1], inline=True)
self.check_get_unique_id(lst) # overloaded on assembler backends
finally:
codemap.CodemapStorage.register_codemap = old_reg_codemap
def check_get_unique_id(self, lst):
pass
class TestLLtype(RecursiveTests, LLJitMixin):
pass
|
oblique-labs/pyVM
|
rpython/jit/metainterp/test/test_recursive.py
|
Python
|
mit
| 47,329
|
"""Top-level module for releng-sop."""
|
release-engineering/releng-sop
|
releng_sop/__init__.py
|
Python
|
mit
| 39
|
# follow/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from datetime import datetime, timedelta
from django.db import models
from election.models import ElectionManager
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception, print_to_log
from issue.models import IssueManager
from organization.models import OrganizationManager
import pytz
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
from voter.models import VoterManager
FOLLOWING = 'FOLLOWING'
STOP_FOLLOWING = 'STOP_FOLLOWING'
FOLLOW_IGNORE = 'FOLLOW_IGNORE'
STOP_IGNORING = 'STOP_IGNORING'
FOLLOWING_CHOICES = (
(FOLLOWING, 'Following'),
(STOP_FOLLOWING, 'Not Following'),
(FOLLOW_IGNORE, 'Ignoring'),
(STOP_IGNORING, 'Not Ignoring'),
)
# Kinds of lists of suggested organization
UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER = \
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS = 'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER = \
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER'
UPDATE_SUGGESTIONS_ALL = 'UPDATE_SUGGESTIONS_ALL'
FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
FOLLOW_SUGGESTIONS_FROM_FRIENDS = 'FOLLOW_SUGGESTIONS_FROM_FRIENDS'
FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER = 'FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER'
logger = wevote_functions.admin.get_logger(__name__)
class FollowCampaignX(models.Model):
voter_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
campaignx_id = models.PositiveIntegerField(null=True, blank=True)
campaignx_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True, db_index=True)
class FollowCampaignXManager(models.Manager):
def __unicode__(self):
return "FollowCampaignXManager"
def toggle_on_follow_campaignx(self, voter_we_vote_id, issue_id, issue_we_vote_id, following_status):
follow_campaignx_on_stage_found = False
follow_campaignx_changed = False
follow_campaignx_on_stage_id = 0
follow_campaignx_on_stage = FollowIssue()
status = ''
issue_identifier_exists = positive_value_exists(issue_we_vote_id) or positive_value_exists(issue_id)
if not positive_value_exists(voter_we_vote_id) and not issue_identifier_exists:
results = {
'success': True if follow_campaignx_on_stage_found else False,
'status': 'Insufficient inputs to toggle issue link, try passing ids for voter and issue ',
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
}
return results
# Does a follow_campaignx entry exist from this voter already exist?
follow_campaignx_manager = FollowIssueManager()
follow_campaignx_id = 0
results = follow_campaignx_manager.retrieve_follow_campaignx(follow_campaignx_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['MultipleObjectsReturned']:
status += 'TOGGLE_FOLLOWING_ISSUE MultipleObjectsReturned ' + following_status
delete_results = follow_campaignx_manager.delete_follow_campaignx(
follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id)
status += delete_results['status']
results = follow_campaignx_manager.retrieve_follow_campaignx(follow_campaignx_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['follow_campaignx_found']:
follow_campaignx_on_stage = results['follow_campaignx']
# Update this follow_campaignx entry with new values - we do not delete because we might be able to use
try:
follow_campaignx_on_stage.following_status = following_status
# We don't need to update here because set set auto_now=True in the field
# follow_campaignx_on_stage.date_last_changed =
follow_campaignx_on_stage.save()
follow_campaignx_changed = True
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
follow_campaignx_on_stage_found = True
status += 'FOLLOW_STATUS_UPDATED_AS ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['DoesNotExist']:
try:
# Create new follow_campaignx entry
# First make sure that issue_id is for a valid issue
issue_manager = IssueManager()
if positive_value_exists(issue_id):
results = issue_manager.retrieve_issue(issue_id)
else:
results = issue_manager.retrieve_issue(0, issue_we_vote_id)
if results['issue_found']:
issue = results['issue']
follow_campaignx_on_stage = FollowIssue(
voter_we_vote_id=voter_we_vote_id,
issue_id=issue.id,
issue_we_vote_id=issue.we_vote_id,
following_status=following_status,
)
# if auto_followed_from_twitter_suggestion:
# follow_campaignx_on_stage.auto_followed_from_twitter_suggestion = True
follow_campaignx_on_stage.save()
follow_campaignx_changed = True
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
follow_campaignx_on_stage_found = True
status += 'CREATE ' + following_status
else:
status = 'ISSUE_NOT_FOUND_ON_CREATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_campaignx_on_stage_found else False,
'status': status,
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
}
return results
def retrieve_follow_campaignx(self, follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
follow_campaignx_id is the identifier for records stored in this table (it is NOT the issue_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
follow_campaignx_on_stage = FollowIssue()
follow_campaignx_on_stage_id = 0
try:
if positive_value_exists(follow_campaignx_id):
follow_campaignx_on_stage = FollowIssue.objects.get(id=follow_campaignx_id)
follow_campaignx_on_stage_id = issue_id.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_campaignx_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_campaignx_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID'
else:
success = False
status = 'FOLLOW_ISSUE_MISSING_REQUIRED_VARIABLES'
except FollowIssue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = 'FOLLOW_ISSUE_NOT_FOUND_MultipleObjectsReturned'
except FollowIssue.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = 'FOLLOW_ISSUE_NOT_FOUND_DoesNotExist'
if positive_value_exists(follow_campaignx_on_stage_id):
follow_campaignx_on_stage_found = True
is_following = follow_campaignx_on_stage.is_following()
is_not_following = follow_campaignx_on_stage.is_not_following()
is_ignoring = follow_campaignx_on_stage.is_ignoring()
else:
follow_campaignx_on_stage_found = False
is_following = False
is_not_following = True
is_ignoring = False
results = {
'status': status,
'success': success,
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
'is_following': is_following,
'is_not_following': is_not_following,
'is_ignoring': is_ignoring,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def delete_follow_campaignx(self, follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
Remove any follow issue entries (we may have duplicate entries)
"""
follow_campaignx_deleted = False
status = ''
try:
if positive_value_exists(follow_campaignx_id):
follow_campaignx_on_stage = FollowIssue.objects.get(id=follow_campaignx_id)
follow_campaignx_on_stage.delete()
follow_campaignx_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_campaignx_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_campaignx_list = list(follow_campaignx_query)
for one_follow_campaignx in follow_campaignx_list:
one_follow_campaignx.delete()
follow_campaignx_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_VOTER_WE_VOTE_ID_AND_ISSUE_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_campaignx_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_campaignx_list = list(follow_campaignx_query)
for one_follow_campaignx in follow_campaignx_list:
one_follow_campaignx.delete()
follow_campaignx_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETE_BY_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID '
else:
success = False
status += 'FOLLOW_ISSUE_DELETE_MISSING_REQUIRED_VARIABLES '
except FollowIssue.DoesNotExist:
success = True
status = 'FOLLOW_ISSUE_DELETE_NOT_FOUND_DoesNotExist '
results = {
'status': status,
'success': success,
'follow_campaignx_deleted': follow_campaignx_deleted,
}
return results
class FollowIssue(models.Model):
# We are relying on built-in Python id field
# The voter following the issue
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# The issue being followed
issue_id = models.PositiveIntegerField(null=True, blank=True)
# This is used when we want to export the issues that are being following
issue_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# Is this person following, not following, or ignoring this issue?
following_status = models.CharField(max_length=15, choices=FOLLOWING_CHOICES, default=FOLLOWING, db_index=True)
# Is the fact that this issue is being followed visible to the public (if linked to organization)?
is_follow_visible_publicly = models.BooleanField(verbose_name='', default=False)
# The date the voter followed or stopped following this issue
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True, db_index=True)
def __unicode__(self):
return self.issue_we_vote_id
def is_following(self):
if self.following_status == FOLLOWING:
return True
return False
def is_not_following(self):
if self.following_status == STOP_FOLLOWING:
return True
return False
def is_ignoring(self):
if self.following_status == FOLLOW_IGNORE:
return True
return False
class FollowIssueManager(models.Manager):
def __unicode__(self):
return "FollowIssueManager"
def toggle_on_voter_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id):
following_status = FOLLOWING
follow_issue_manager = FollowIssueManager()
return follow_issue_manager.toggle_following_issue(voter_we_vote_id, issue_id, issue_we_vote_id,
following_status)
def toggle_off_voter_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id):
following_status = STOP_FOLLOWING
follow_issue_manager = FollowIssueManager()
return follow_issue_manager.toggle_following_issue(voter_we_vote_id, issue_id, issue_we_vote_id,
following_status)
def toggle_ignore_voter_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id):
following_status = FOLLOW_IGNORE
follow_issue_manager = FollowIssueManager()
return follow_issue_manager.toggle_following_issue(voter_we_vote_id, issue_id, issue_we_vote_id,
following_status)
def toggle_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id, following_status):
follow_issue_on_stage_found = False
follow_issue_changed = False
follow_issue_on_stage_id = 0
follow_issue_on_stage = FollowIssue()
status = ''
issue_identifier_exists = positive_value_exists(issue_we_vote_id) or positive_value_exists(issue_id)
if not positive_value_exists(voter_we_vote_id) and not issue_identifier_exists:
results = {
'success': True if follow_issue_on_stage_found else False,
'status': 'Insufficient inputs to toggle issue link, try passing ids for voter and issue ',
'follow_issue_found': follow_issue_on_stage_found,
'follow_issue_id': follow_issue_on_stage_id,
'follow_issue': follow_issue_on_stage,
}
return results
# Does a follow_issue entry exist from this voter already exist?
follow_issue_manager = FollowIssueManager()
follow_issue_id = 0
results = follow_issue_manager.retrieve_follow_issue(follow_issue_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['MultipleObjectsReturned']:
status += 'TOGGLE_FOLLOWING_ISSUE MultipleObjectsReturned ' + following_status
delete_results = follow_issue_manager.delete_follow_issue(
follow_issue_id, voter_we_vote_id, issue_id, issue_we_vote_id)
status += delete_results['status']
results = follow_issue_manager.retrieve_follow_issue(follow_issue_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['follow_issue_found']:
follow_issue_on_stage = results['follow_issue']
# Update this follow_issue entry with new values - we do not delete because we might be able to use
try:
follow_issue_on_stage.following_status = following_status
# We don't need to update here because set set auto_now=True in the field
# follow_issue_on_stage.date_last_changed =
follow_issue_on_stage.save()
follow_issue_changed = True
follow_issue_on_stage_id = follow_issue_on_stage.id
follow_issue_on_stage_found = True
status += 'FOLLOW_STATUS_UPDATED_AS ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['DoesNotExist']:
try:
# Create new follow_issue entry
# First make sure that issue_id is for a valid issue
issue_manager = IssueManager()
if positive_value_exists(issue_id):
results = issue_manager.retrieve_issue(issue_id)
else:
results = issue_manager.retrieve_issue(0, issue_we_vote_id)
if results['issue_found']:
issue = results['issue']
follow_issue_on_stage = FollowIssue(
voter_we_vote_id=voter_we_vote_id,
issue_id=issue.id,
issue_we_vote_id=issue.we_vote_id,
following_status=following_status,
)
# if auto_followed_from_twitter_suggestion:
# follow_issue_on_stage.auto_followed_from_twitter_suggestion = True
follow_issue_on_stage.save()
follow_issue_changed = True
follow_issue_on_stage_id = follow_issue_on_stage.id
follow_issue_on_stage_found = True
status += 'CREATE ' + following_status
else:
status = 'ISSUE_NOT_FOUND_ON_CREATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_issue_on_stage_found else False,
'status': status,
'follow_issue_found': follow_issue_on_stage_found,
'follow_issue_id': follow_issue_on_stage_id,
'follow_issue': follow_issue_on_stage,
}
return results
def retrieve_follow_issue(self, follow_issue_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
follow_issue_id is the identifier for records stored in this table (it is NOT the issue_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
follow_issue_on_stage = FollowIssue()
follow_issue_on_stage_id = 0
try:
if positive_value_exists(follow_issue_id):
follow_issue_on_stage = FollowIssue.objects.get(id=follow_issue_id)
follow_issue_on_stage_id = issue_id.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_issue_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_issue_on_stage_id = follow_issue_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_issue_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_issue_on_stage_id = follow_issue_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID'
else:
success = False
status = 'FOLLOW_ISSUE_MISSING_REQUIRED_VARIABLES'
except FollowIssue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = 'FOLLOW_ISSUE_NOT_FOUND_MultipleObjectsReturned'
except FollowIssue.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = 'FOLLOW_ISSUE_NOT_FOUND_DoesNotExist'
if positive_value_exists(follow_issue_on_stage_id):
follow_issue_on_stage_found = True
is_following = follow_issue_on_stage.is_following()
is_not_following = follow_issue_on_stage.is_not_following()
is_ignoring = follow_issue_on_stage.is_ignoring()
else:
follow_issue_on_stage_found = False
is_following = False
is_not_following = True
is_ignoring = False
results = {
'status': status,
'success': success,
'follow_issue_found': follow_issue_on_stage_found,
'follow_issue_id': follow_issue_on_stage_id,
'follow_issue': follow_issue_on_stage,
'is_following': is_following,
'is_not_following': is_not_following,
'is_ignoring': is_ignoring,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def delete_follow_issue(self, follow_issue_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
Remove any follow issue entries (we may have duplicate entries)
"""
follow_issue_deleted = False
status = ''
try:
if positive_value_exists(follow_issue_id):
follow_issue_on_stage = FollowIssue.objects.get(id=follow_issue_id)
follow_issue_on_stage.delete()
follow_issue_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_issue_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_issue_list = list(follow_issue_query)
for one_follow_issue in follow_issue_list:
one_follow_issue.delete()
follow_issue_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_VOTER_WE_VOTE_ID_AND_ISSUE_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_issue_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_issue_list = list(follow_issue_query)
for one_follow_issue in follow_issue_list:
one_follow_issue.delete()
follow_issue_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETE_BY_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID '
else:
success = False
status += 'FOLLOW_ISSUE_DELETE_MISSING_REQUIRED_VARIABLES '
except FollowIssue.DoesNotExist:
success = True
status = 'FOLLOW_ISSUE_DELETE_NOT_FOUND_DoesNotExist '
results = {
'status': status,
'success': success,
'follow_issue_deleted': follow_issue_deleted,
}
return results
def update_or_create_suggested_issue_to_follow(self, viewer_voter_we_vote_id, issue_we_vote_id,
from_twitter=False):
"""
Create or update the SuggestedIssueToFollow table with suggested issues from twitter ids i follow
or issue of my friends follow.
:param viewer_voter_we_vote_id:
:param issue_we_vote_id:
:param from_twitter:
:return:
"""
status = ''
try:
suggested_issue_to_follow, created = SuggestedIssueToFollow.objects.update_or_create(
viewer_voter_we_vote_id=viewer_voter_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
defaults={
'viewer_voter_we_vote_id': viewer_voter_we_vote_id,
'issue_we_vote_id': issue_we_vote_id,
'from_twitter': from_twitter
}
)
suggested_issue_to_follow_saved = True
success = True
status += "SUGGESTED_ISSUE_TO_FOLLOW_UPDATED "
except Exception as e:
suggested_issue_to_follow_saved = False
suggested_issue_to_follow = SuggestedIssueToFollow()
success = False
status += "SUGGESTED_ISSUE_TO_FOLLOW_NOT_UPDATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_issue_to_follow_saved': suggested_issue_to_follow_saved,
'suggested_issue_to_follow': suggested_issue_to_follow,
}
return results
def retrieve_suggested_issue_to_follow_list(self, viewer_voter_we_vote_id, from_twitter=False):
"""
Retrieving suggested issues who i follow from SuggestedOrganizationToFollow table.
:param viewer_voter_we_vote_id:
:param from_twitter:
:return:
"""
suggested_issue_to_follow_list = []
status = ''
try:
suggested_issue_to_follow_queryset = SuggestedIssueToFollow.objects.all()
suggested_issue_to_follow_list = suggested_issue_to_follow_queryset.filter(
viewer_voter_we_vote_id__iexact=viewer_voter_we_vote_id,
from_twitter=from_twitter)
if len(suggested_issue_to_follow_list):
success = True
suggested_issue_to_follow_list_found = True
status += "SUGGESTED_ISSUE_TO_FOLLOW_RETRIEVED "
else:
success = True
suggested_issue_to_follow_list_found = False
status += "NO_SUGGESTED_ISSUE_TO_FOLLOW_LIST_RETRIEVED "
except SuggestedIssueToFollow.DoesNotExist:
# No data found. Try again below
success = True
suggested_issue_to_follow_list_found = False
status = 'NO_SUGGESTED_ISSUE_TO_FOLLOW_LIST_RETRIEVED_DoesNotExist '
except Exception as e:
success = False
suggested_issue_to_follow_list_found = False
status += "SUGGESTED_ISSUE_TO_FOLLOW_LIST_NOT_RETRIEVED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_issue_to_follow_list_found': suggested_issue_to_follow_list_found,
'suggested_issue_to_follow_list': suggested_issue_to_follow_list,
}
return results
class FollowMetricsManager(models.Manager):
def __unicode__(self):
return "FollowMetricsManager"
def fetch_organization_followers(self, organization_we_vote_id, google_civic_election_id=0):
count_result = None
try:
count_query = FollowOrganization.objects.using('readonly').all()
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(following_status=FOLLOWING)
count_query = count_query.values("voter_id").distinct()
if positive_value_exists(google_civic_election_id):
election_manager = ElectionManager()
election_result = election_manager.retrieve_election(google_civic_election_id)
if election_result['election_found']:
election = election_result['election']
if positive_value_exists(election.election_day_text):
timezone = pytz.timezone("America/Los_Angeles")
date_of_election = timezone.localize(datetime.strptime(election.election_day_text, "%Y-%m-%d"))
date_of_election += timedelta(days=1) # Add one day, to catch the entire election day
# Find all of the follow entries before or on the day of the election
count_query = count_query.filter(date_last_changed__lte=date_of_election)
else:
# Failed retrieving date, so we return 0
return 0
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_issues_followed(self, voter_we_vote_id='',
limit_to_one_date_as_integer=0, count_through_this_date_as_integer=0):
timezone = pytz.timezone("America/Los_Angeles")
if positive_value_exists(limit_to_one_date_as_integer):
one_date_string = str(limit_to_one_date_as_integer)
limit_to_one_date = timezone.localize(datetime.strptime(one_date_string, "%Y%m%d"))
if positive_value_exists(count_through_this_date_as_integer):
count_through_date_string = str(count_through_this_date_as_integer)
count_through_this_date = timezone.localize(datetime.strptime(count_through_date_string, "%Y%m%d"))
count_result = None
try:
count_query = FollowIssue.objects.using('readonly').all()
if positive_value_exists(voter_we_vote_id):
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(following_status=FOLLOWING)
if positive_value_exists(limit_to_one_date_as_integer):
# TODO DALE THIS NEEDS WORK TO FIND ALL ENTRIES ON ONE DAY
count_query = count_query.filter(date_last_changed=limit_to_one_date)
elif positive_value_exists(count_through_this_date_as_integer):
count_query = count_query.filter(date_last_changed__lte=count_through_this_date)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_organizations_followed(self, voter_id):
count_result = None
try:
count_query = FollowOrganization.objects.using('readonly').all()
count_query = count_query.filter(voter_id=voter_id)
count_query = count_query.filter(following_status=FOLLOWING)
count_result = count_query.count()
except Exception as e:
pass
return count_result
class FollowIssueList(models.Model):
"""
A way to retrieve all of the follow_issue information
"""
def fetch_follow_issue_count_by_issue_we_vote_id(self, issue_we_vote_id):
follow_issue_list_length = 0
try:
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
follow_issue_list_query = follow_issue_list_query.filter(issue_we_vote_id__iexact=issue_we_vote_id)
follow_issue_list_query = follow_issue_list_query.filter(following_status=FOLLOWING)
follow_issue_list_length = follow_issue_list_query.count()
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
return follow_issue_list_length
def fetch_follow_issue_count_by_voter_we_vote_id(self, voter_we_vote_id, following_status=None):
if following_status is None:
following_status = FOLLOWING
follow_issue_list_length = 0
try:
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
follow_issue_list_query = follow_issue_list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
follow_issue_list_query = follow_issue_list_query.filter(following_status=following_status)
follow_issue_list_length = follow_issue_list_query.count()
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
return follow_issue_list_length
def retrieve_follow_issue_list_by_voter_we_vote_id(self, voter_we_vote_id, following_status=None, read_only=True):
"""
Retrieve a list of follow_issue entries for this voter
:param voter_we_vote_id:
:param following_status:
:param read_only:
:return: a list of follow_issue objects for the voter_we_vote_id
"""
follow_issue_list_found = False
if following_status is None:
following_status = FOLLOWING
follow_issue_list = {}
try:
if positive_value_exists(read_only):
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
else:
follow_issue_list_query = FollowIssue.objects.all()
follow_issue_list_query = follow_issue_list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
if positive_value_exists(following_status):
follow_issue_list = follow_issue_list_query.filter(following_status=following_status)
if len(follow_issue_list):
follow_issue_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_issue_list_found:
return follow_issue_list
else:
follow_issue_list = {}
return follow_issue_list
def retrieve_follow_issue_we_vote_id_list_by_voter_we_vote_id(self, voter_we_vote_id, following_status=None):
follow_issue_we_vote_id_list = []
follow_issue_we_vote_id_list_result = []
if following_status is None:
following_status = FOLLOWING
try:
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
follow_issue_list_query = follow_issue_list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
if positive_value_exists(following_status):
follow_issue_list_query = follow_issue_list_query.filter(following_status=following_status)
follow_issue_list_query = follow_issue_list_query.values("issue_we_vote_id").distinct()
follow_issue_we_vote_id_list_result = list(follow_issue_list_query)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
for query in follow_issue_we_vote_id_list_result:
follow_issue_we_vote_id_list.append(query["issue_we_vote_id"])
return follow_issue_we_vote_id_list
def fetch_follow_issue_following_count_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOWING
return self.fetch_follow_issue_count_by_voter_we_vote_id(voter_we_vote_id, following_status)
def fetch_follow_issue_ignore_count_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOW_IGNORE
return self.fetch_follow_issue_count_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_ignore_list_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOW_IGNORE
return self.retrieve_follow_issue_list_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_following_we_vote_id_list_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOWING
return self.retrieve_follow_issue_we_vote_id_list_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_ignore_we_vote_id_list_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOW_IGNORE
return self.retrieve_follow_issue_we_vote_id_list_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_list_by_issue_id(self, issue_id):
issue_we_vote_id = None
following_status = FOLLOWING
return self.retrieve_follow_issue_list(issue_id, issue_we_vote_id, following_status)
def retrieve_follow_issue_following_list_by_issue_we_vote_id(self, issue_we_vote_id):
issue_id = None
following_status = FOLLOWING
return self.retrieve_follow_issue_list(issue_id, issue_we_vote_id, following_status)
def retrieve_follow_issue_list(self, issue_id, issue_we_vote_id, following_status):
follow_issue_list_found = False
follow_issue_list = {}
try:
follow_issue_list = FollowIssue.objects.using('readonly').all()
if positive_value_exists(issue_id):
follow_issue_list = follow_issue_list.filter(issue_id=issue_id)
else:
follow_issue_list = follow_issue_list.filter(issue_we_vote_id__iexact=issue_we_vote_id)
if positive_value_exists(following_status):
follow_issue_list = follow_issue_list.filter(following_status=following_status)
if len(follow_issue_list):
follow_issue_list_found = True
except Exception as e:
pass
if follow_issue_list_found:
return follow_issue_list
else:
follow_issue_list = {}
return follow_issue_list
class FollowOrganization(models.Model):
# We are relying on built-in Python id field
# The voter following the organization
voter_id = models.BigIntegerField(null=True, blank=True, db_index=True)
# The organization being followed
organization_id = models.BigIntegerField(null=True, blank=True, db_index=True)
voter_linked_organization_we_vote_id = models.CharField(
verbose_name="organization we vote permanent id",
max_length=255, null=True, blank=True, unique=False, db_index=True)
# This is used when we want to export the organizations that a voter is following
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
# Is this person following or ignoring this organization?
following_status = models.CharField(max_length=15, choices=FOLLOWING_CHOICES, default=FOLLOWING, db_index=True)
# Is this person automatically following the suggested twitter organization?
auto_followed_from_twitter_suggestion = models.BooleanField(verbose_name='', default=False)
# Is the fact that this organization is being followed by voter visible to the public?
is_follow_visible_publicly = models.BooleanField(verbose_name='', default=False)
# The date the voter followed or stopped following this organization
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# This is used when we want to export the organizations that a voter is following
def voter_we_vote_id(self):
voter_manager = VoterManager()
return voter_manager.fetch_we_vote_id_from_local_id(self.voter_id)
def __unicode__(self):
return self.organization_id
def is_following(self):
if self.following_status == FOLLOWING:
return True
return False
def is_not_following(self):
if self.following_status == STOP_FOLLOWING:
return True
return False
def is_ignoring(self):
if self.following_status == FOLLOW_IGNORE:
return True
return False
class FollowOrganizationManager(models.Manager):
def __unicode__(self):
return "FollowOrganizationManager"
def fetch_number_of_organizations_followed(self, voter_id):
number_of_organizations_followed = 0
try:
if positive_value_exists(voter_id):
follow_organization_query = FollowOrganization.objects.filter(
voter_id=voter_id,
following_status=FOLLOWING
)
number_of_organizations_followed = follow_organization_query.count()
except Exception as e:
pass
return number_of_organizations_followed
def toggle_on_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id,
auto_followed_from_twitter_suggestion=False):
following_status = FOLLOWING
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status,
auto_followed_from_twitter_suggestion)
def toggle_off_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id):
following_status = STOP_FOLLOWING
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status)
def toggle_ignore_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id):
following_status = FOLLOW_IGNORE
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status)
def toggle_off_voter_ignoring_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id):
following_status = STOP_FOLLOWING # STOP_IGNORING (We don't actually store STOP_IGNORING in the database
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status)
def toggle_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id, following_status,
auto_followed_from_twitter_suggestion=False):
status = ""
# Does a follow_organization entry exist from this voter already exist?
follow_organization_manager = FollowOrganizationManager()
results = follow_organization_manager.retrieve_follow_organization(0, voter_id,
organization_id, organization_we_vote_id)
follow_organization_on_stage_found = False
follow_organization_on_stage_id = 0
follow_organization_on_stage = FollowOrganization()
if results['follow_organization_found']:
follow_organization_on_stage = results['follow_organization']
# Update this follow_organization entry with new values - we do not delete because we might be able to use
try:
if auto_followed_from_twitter_suggestion:
# If here we are auto-following because the voter follows this organization on Twitter
if follow_organization_on_stage.following_status == "STOP_FOLLOWING" or \
follow_organization_on_stage.following_status == "FOLLOW_IGNORE":
# Do not follow again
pass
else:
follow_organization_on_stage.following_status = following_status
else:
follow_organization_on_stage.following_status = following_status
follow_organization_on_stage.auto_followed_from_twitter_suggestion = False
follow_organization_on_stage.voter_linked_organization_we_vote_id = voter_linked_organization_we_vote_id
# We don't need to update here because set set auto_now=True in the field
# follow_organization_on_stage.date_last_changed =
follow_organization_on_stage.save()
follow_organization_on_stage_id = follow_organization_on_stage.id
follow_organization_on_stage_found = True
status += 'UPDATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status + ' '
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['MultipleObjectsReturned']:
logger.warning("follow_organization: delete all but one and take it over?")
status += 'TOGGLE_FOLLOWING_ORGANIZATION MultipleObjectsReturned ' + following_status + ' '
elif results['DoesNotExist']:
try:
# Create new follow_organization entry
# First make sure that organization_id is for a valid organization
organization_manager = OrganizationManager()
if positive_value_exists(organization_id):
results = organization_manager.retrieve_organization(organization_id)
else:
results = organization_manager.retrieve_organization(0, organization_we_vote_id)
if results['organization_found']:
organization = results['organization']
follow_organization_on_stage = FollowOrganization(
voter_id=voter_id,
organization_id=organization.id,
organization_we_vote_id=organization.we_vote_id,
voter_linked_organization_we_vote_id=voter_linked_organization_we_vote_id,
following_status=following_status,
)
if auto_followed_from_twitter_suggestion:
follow_organization_on_stage.auto_followed_from_twitter_suggestion = True
follow_organization_on_stage.save()
follow_organization_on_stage_id = follow_organization_on_stage.id
follow_organization_on_stage_found = True
status += 'CREATE ' + following_status + ' '
else:
status += 'ORGANIZATION_NOT_FOUND_ON_CREATE ' + following_status + ' '
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status + ' '
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_organization_on_stage_found else False,
'status': status,
'follow_organization_found': follow_organization_on_stage_found,
'follow_organization_id': follow_organization_on_stage_id,
'follow_organization': follow_organization_on_stage,
'voter_linked_organization_we_vote_id': voter_linked_organization_we_vote_id,
}
return results
def retrieve_follow_organization(self, follow_organization_id, voter_id, organization_id, organization_we_vote_id,
read_only=False):
"""
follow_organization_id is the identifier for records stored in this table (it is NOT the organization_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
follow_organization_on_stage = FollowOrganization()
follow_organization_on_stage_id = 0
status = ""
try:
if positive_value_exists(follow_organization_id):
if read_only:
follow_organization_on_stage = FollowOrganization.objects.using('readonly').get(
id=follow_organization_id)
else:
follow_organization_on_stage = FollowOrganization.objects.get(id=follow_organization_id)
follow_organization_on_stage_id = organization_id.id
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_ID '
elif positive_value_exists(voter_id) and positive_value_exists(organization_id):
if read_only:
follow_organization_on_stage = FollowOrganization.objects.using('readonly').get(
voter_id=voter_id, organization_id=organization_id)
else:
follow_organization_on_stage = FollowOrganization.objects.get(
voter_id=voter_id, organization_id=organization_id)
follow_organization_on_stage_id = follow_organization_on_stage.id
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_ID '
elif positive_value_exists(voter_id) and positive_value_exists(organization_we_vote_id):
if read_only:
follow_organization_on_stage = FollowOrganization.objects.using('readonly').get(
voter_id=voter_id, organization_we_vote_id=organization_we_vote_id)
else:
follow_organization_on_stage = FollowOrganization.objects.get(
voter_id=voter_id, organization_we_vote_id=organization_we_vote_id)
follow_organization_on_stage_id = follow_organization_on_stage.id
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_WE_VOTE_ID '
else:
success = False
status += 'FOLLOW_ORGANIZATION_MISSING_REQUIRED_VARIABLES '
except FollowOrganization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status += 'FOLLOW_ORGANIZATION_NOT_FOUND_MultipleObjectsReturned '
follow_organization_list_found = False
follow_organization_list = []
# Delete the oldest values and retrieve the correct one
try:
if positive_value_exists(voter_id) and positive_value_exists(organization_id):
follow_organization_query = FollowOrganization.objects.all()
follow_organization_query = follow_organization_query.filter(
voter_id=voter_id, organization_id=organization_id)
follow_organization_query = follow_organization_query.order_by('id')
follow_organization_list = list(follow_organization_query)
follow_organization_list_found = positive_value_exists(len(follow_organization_list))
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_ID '
elif positive_value_exists(voter_id) and positive_value_exists(organization_we_vote_id):
follow_organization_query = FollowOrganization.objects.all()
follow_organization_query = follow_organization_query.filter(
voter_id=voter_id, organization_we_vote_id=organization_we_vote_id)
follow_organization_query = follow_organization_query.order_by('id')
follow_organization_list = list(follow_organization_query)
follow_organization_list_found = positive_value_exists(len(follow_organization_list))
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_WE_VOTE_ID '
if follow_organization_list_found:
follow_organization_on_stage = follow_organization_list.pop()
follow_organization_on_stage_id = follow_organization_on_stage.id
# Now cycle through remaining list and delete
for one_follow_organization in follow_organization_list:
one_follow_organization.delete()
print_to_log(logger, exception_message_optional="FollowOrganization duplicates removed.")
except Exception as e:
handle_exception(e, logger,
exception_message="Error trying to delete duplicate FollowOrganization entries.")
except FollowOrganization.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status += 'FOLLOW_ORGANIZATION_NOT_FOUND_DoesNotExist '
if positive_value_exists(follow_organization_on_stage_id):
follow_organization_on_stage_found = True
is_following = follow_organization_on_stage.is_following()
is_not_following = follow_organization_on_stage.is_not_following()
is_ignoring = follow_organization_on_stage.is_ignoring()
else:
follow_organization_on_stage_found = False
is_following = False
is_not_following = True
is_ignoring = False
results = {
'status': status,
'success': success,
'follow_organization_found': follow_organization_on_stage_found,
'follow_organization_id': follow_organization_on_stage_id,
'follow_organization': follow_organization_on_stage,
'is_following': is_following,
'is_not_following': is_not_following,
'is_ignoring': is_ignoring,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def retrieve_voter_following_org_status(self, voter_id, voter_we_vote_id,
organization_id, organization_we_vote_id, read_only=False):
"""
Retrieve one follow entry so we can see if a voter is following or ignoring a particular org
"""
if not positive_value_exists(voter_id) and positive_value_exists(voter_we_vote_id):
# We need voter_id to call retrieve_follow_organization
voter_manager = VoterManager()
voter_id = voter_manager.fetch_local_id_from_we_vote_id(voter_we_vote_id)
if not positive_value_exists(voter_id) and \
not (positive_value_exists(organization_id) or positive_value_exists(organization_we_vote_id)):
results = {
'status': 'RETRIEVE_VOTER_FOLLOWING_MISSING_VARIABLES',
'success': False,
'follow_organization_found': False,
'follow_organization_id': 0,
'follow_organization': FollowOrganization(),
'is_following': False,
'is_not_following': True,
'is_ignoring': False,
'error_result': True,
'DoesNotExist': False,
'MultipleObjectsReturned': False,
}
return results
return self.retrieve_follow_organization(
0, voter_id, organization_id, organization_we_vote_id, read_only=read_only)
def update_or_create_suggested_organization_to_follow(self, viewer_voter_we_vote_id, organization_we_vote_id,
from_twitter=False):
"""
Create or update the SuggestedOrganizationToFollow table with suggested organizations from twitter ids i follow
or organization of my friends follow.
:param viewer_voter_we_vote_id:
:param organization_we_vote_id:
:param from_twitter:
:return:
"""
status = ''
try:
suggested_organization_to_follow, created = SuggestedOrganizationToFollow.objects.update_or_create(
viewer_voter_we_vote_id=viewer_voter_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
defaults={
'viewer_voter_we_vote_id': viewer_voter_we_vote_id,
'organization_we_vote_id': organization_we_vote_id,
'from_twitter': from_twitter
}
)
suggested_organization_to_follow_saved = True
success = True
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_UPDATED "
except Exception as e:
suggested_organization_to_follow_saved = False
suggested_organization_to_follow = SuggestedOrganizationToFollow()
success = False
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_NOT_UPDATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_organization_to_follow_saved': suggested_organization_to_follow_saved,
'suggested_organization_to_follow': suggested_organization_to_follow,
}
return results
def retrieve_suggested_organization_to_follow_list(self, viewer_voter_we_vote_id, from_twitter=False):
"""
Retrieving suggested organizations who i follow from SuggestedOrganizationToFollow table.
:param viewer_voter_we_vote_id:
:param from_twitter:
:return:
"""
suggested_organization_to_follow_list = []
status = ''
try:
suggested_organization_to_follow_queryset = SuggestedOrganizationToFollow.objects.all()
suggested_organization_to_follow_list = suggested_organization_to_follow_queryset.filter(
viewer_voter_we_vote_id__iexact=viewer_voter_we_vote_id,
from_twitter=from_twitter)
if len(suggested_organization_to_follow_list):
success = True
suggested_organization_to_follow_list_found = True
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_RETRIEVED "
else:
success = True
suggested_organization_to_follow_list_found = False
status += "NO_SUGGESTED_ORGANIZATION_TO_FOLLOW_LIST_RETRIEVED "
except SuggestedOrganizationToFollow.DoesNotExist:
# No data found. Try again below
success = True
suggested_organization_to_follow_list_found = False
status += 'NO_SUGGESTED_ORGANIZATION_TO_FOLLOW_LIST_RETRIEVED_DoesNotExist '
except Exception as e:
success = False
suggested_organization_to_follow_list_found = False
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_LIST_NOT_RETRIEVED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_organization_to_follow_list_found': suggested_organization_to_follow_list_found,
'suggested_organization_to_follow_list': suggested_organization_to_follow_list,
}
return results
class FollowOrganizationList(models.Model):
"""
A way to retrieve all of the follow_organization information
"""
def fetch_follow_organization_by_voter_id_count(self, voter_id):
follow_organization_list = self.retrieve_follow_organization_by_voter_id(voter_id)
return len(follow_organization_list)
def retrieve_follow_organization_by_voter_id(self, voter_id, auto_followed_from_twitter_suggestion=False,
read_only=False):
# Retrieve a list of follow_organization entries for this voter
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = {}
try:
# Should not default to 'readonly' since we sometimes save the results of this call
if read_only:
follow_organization_list = FollowOrganization.objects.using('readonly').all()
else:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(voter_id=voter_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if auto_followed_from_twitter_suggestion:
follow_organization_list = follow_organization_list.filter(
auto_followed_from_twitter_suggestion=auto_followed_from_twitter_suggestion)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
def retrieve_follow_organization_by_own_organization_we_vote_id(self, organization_we_vote_id,
auto_followed_from_twitter_suggestion=False):
# Retrieve a list of followed organizations entries by voter_linked_organization_we_vote_id for voter guides
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = []
try:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(
voter_linked_organization_we_vote_id=organization_we_vote_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if auto_followed_from_twitter_suggestion:
follow_organization_list = follow_organization_list.filter(
auto_followed_from_twitter_suggestion=auto_followed_from_twitter_suggestion)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = []
return follow_organization_list
def retrieve_ignore_organization_by_voter_id(self, voter_id, read_only=False):
# Retrieve a list of follow_organization entries for this voter
follow_organization_list_found = False
following_status = FOLLOW_IGNORE
follow_organization_list = {}
try:
if positive_value_exists(read_only):
follow_organization_list = FollowOrganization.objects.using('readonly').all()
else:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(voter_id=voter_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
def retrieve_follow_organization_by_voter_id_simple_id_array(self, voter_id, return_we_vote_id=False,
auto_followed_from_twitter_suggestion=False,
read_only=False):
follow_organization_list_manager = FollowOrganizationList()
follow_organization_list = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id(
voter_id, auto_followed_from_twitter_suggestion, read_only=read_only)
follow_organization_list_simple_array = []
if len(follow_organization_list):
voter_manager = VoterManager()
voter_linked_organization_we_vote_id = \
voter_manager.fetch_linked_organization_we_vote_id_from_local_id(voter_id)
for follow_organization in follow_organization_list:
if not read_only:
# Heal the data by making sure the voter's linked_organization_we_vote_id exists and is accurate
if positive_value_exists(voter_linked_organization_we_vote_id) \
and voter_linked_organization_we_vote_id != \
follow_organization.voter_linked_organization_we_vote_id:
try:
follow_organization.voter_linked_organization_we_vote_id = \
voter_linked_organization_we_vote_id
follow_organization.save()
except Exception as e:
status = 'FAILED_TO_UPDATE_FOLLOW_ISSUE-voter_id ' + str(voter_id)
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
if return_we_vote_id:
follow_organization_list_simple_array.append(follow_organization.organization_we_vote_id)
else:
follow_organization_list_simple_array.append(follow_organization.organization_id)
return follow_organization_list_simple_array
def retrieve_followed_organization_by_organization_we_vote_id_simple_id_array(
self, organization_we_vote_id, return_we_vote_id=False,
auto_followed_from_twitter_suggestion=False):
follow_organization_list_manager = FollowOrganizationList()
follow_organization_list = \
follow_organization_list_manager.retrieve_follow_organization_by_own_organization_we_vote_id(
organization_we_vote_id, auto_followed_from_twitter_suggestion)
follow_organization_list_simple_array = []
if len(follow_organization_list):
for follow_organization in follow_organization_list:
if return_we_vote_id:
follow_organization_list_simple_array.append(follow_organization.organization_we_vote_id)
else:
follow_organization_list_simple_array.append(follow_organization.organization_id)
return follow_organization_list_simple_array
def fetch_followers_list_by_organization_we_vote_id(
self, organization_we_vote_id, return_voter_we_vote_id=False):
"""
Fetch a list of the voter_id or voter_we_vote_id of followers of organization_we_vote_id.
:param organization_we_vote_id:
:param return_voter_we_vote_id:
:return:
"""
follow_organization_list_manager = FollowOrganizationList()
followers_list = \
follow_organization_list_manager.retrieve_follow_organization_by_organization_we_vote_id(
organization_we_vote_id)
followers_list_simple_array = []
if len(followers_list):
voter_manager = VoterManager()
for follow_organization in followers_list:
if return_voter_we_vote_id:
voter_we_vote_id = voter_manager.fetch_we_vote_id_from_local_id(follow_organization.voter_id)
if positive_value_exists(voter_we_vote_id):
followers_list_simple_array.append(voter_we_vote_id)
else:
if positive_value_exists(follow_organization.voter_id):
followers_list_simple_array.append(follow_organization.voter_id)
return followers_list_simple_array
def retrieve_followers_organization_by_organization_we_vote_id_simple_id_array(
self, organization_we_vote_id, return_we_vote_id=False,
auto_followed_from_twitter_suggestion=False):
"""
Retrieve the organization_id (or organization_we_vote_id) for each voter that follows organization_we_vote_id.
:param organization_we_vote_id:
:param return_we_vote_id:
:param auto_followed_from_twitter_suggestion:
:return:
"""
follow_organization_list_manager = FollowOrganizationList()
followers_organization_list = \
follow_organization_list_manager.retrieve_follow_organization_by_organization_we_vote_id(
organization_we_vote_id)
followers_organization_list_simple_array = []
if len(followers_organization_list):
for follow_organization in followers_organization_list:
if return_we_vote_id:
if positive_value_exists(follow_organization.voter_linked_organization_we_vote_id):
followers_organization_list_simple_array.append(
follow_organization.voter_linked_organization_we_vote_id)
else:
followers_organization_list_simple_array.append(follow_organization.organization_id)
return followers_organization_list_simple_array
def retrieve_ignore_organization_by_voter_id_simple_id_array(
self, voter_id, return_we_vote_id=False, read_only=False):
follow_organization_list_manager = FollowOrganizationList()
ignore_organization_list = \
follow_organization_list_manager.retrieve_ignore_organization_by_voter_id(voter_id, read_only=read_only)
ignore_organization_list_simple_array = []
if len(ignore_organization_list):
for ignore_organization in ignore_organization_list:
if return_we_vote_id:
ignore_organization_list_simple_array.append(ignore_organization.organization_we_vote_id)
else:
ignore_organization_list_simple_array.append(ignore_organization.organization_id)
return ignore_organization_list_simple_array
def retrieve_follow_organization_by_organization_id(self, organization_id):
# Retrieve a list of follow_organization entries for this organization
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = {}
try:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(organization_id=organization_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
pass
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
def retrieve_follow_organization_by_organization_we_vote_id(self, organization_we_vote_id):
# Retrieve a list of follow_organization entries for this organization
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = {}
try:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(organization_we_vote_id=organization_we_vote_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
pass
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
class SuggestedIssueToFollow(models.Model):
"""
This table stores possible suggested issues to follow
"""
viewer_voter_we_vote_id = models.CharField(
verbose_name="voter we vote id", max_length=255, null=True, blank=True, unique=False)
issue_we_vote_id = models.CharField(
verbose_name="issue we vote id", max_length=255, null=True, blank=True, unique=False)
# organization_we_vote_id_making_suggestion = models.CharField(
# verbose_name="organization we vote id making decision", max_length=255, null=True, blank=True, unique=False)
# from_twitter = models.BooleanField(verbose_name="from twitter", default=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# def fetch_other_organization_we_vote_id(self, one_we_vote_id):
# if one_we_vote_id == self.viewer_voter_we_vote_id:
# return self.viewee_voter_we_vote_id
# else:
# # If the we_vote_id passed in wasn't found, don't return another we_vote_id
# return ""
class SuggestedOrganizationToFollow(models.Model):
"""
This table stores possible suggested organization from twitter ids i follow or organization of my friends follow.
"""
viewer_voter_we_vote_id = models.CharField(
verbose_name="voter we vote id person 1", max_length=255, null=True, blank=True, unique=False)
organization_we_vote_id = models.CharField(
verbose_name="organization we vote id person 2", max_length=255, null=True, blank=True, unique=False)
# organization_we_vote_id_making_suggestion = models.CharField(
# verbose_name="organization we vote id making decision", max_length=255, null=True, blank=True, unique=False)
from_twitter = models.BooleanField(verbose_name="from twitter", default=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
def fetch_other_organization_we_vote_id(self, one_we_vote_id):
if one_we_vote_id == self.viewer_voter_we_vote_id:
return self.viewee_voter_we_vote_id
else:
# If the we_vote_id passed in wasn't found, don't return another we_vote_id
return ""
|
wevote/WeVoteServer
|
follow/models.py
|
Python
|
mit
| 78,451
|
# Copyright (c) 2010 by Dan Jacob.
#
# Some rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass
try:
assert raw_input
except NameError:
raw_input = input
def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = raw_input(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_pass(name, default=None):
"""
Grabs hidden (password) input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = getpass.getpass(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_bool(name, default=False, yes_choices=None, no_choices=None):
"""
Grabs user input from command line and converts to boolean
value.
:param name: prompt text
:param default: default value if no input provided.
:param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'
:param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'
"""
yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't')
no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f')
while True:
rv = prompt(name + '?', default and yes_choices[0] or no_choices[0])
if rv.lower() in yes_choices:
return True
elif rv.lower() in no_choices:
return False
def prompt_choices(name, choices, default=None, no_choice=('none',)):
"""
Grabs user input from command line from set of provided choices.
:param name: prompt text
:param choices: list or tuple of available choices.
:param default: default value if no input provided.
:param no_choice: acceptable list of strings for "null choice"
"""
_choices = []
options = []
for choice in choices:
options.append(choice)
_choices.append(choice)
while True:
rv = prompt(name + '? - (%s)' % ', '.join(options), default)
rv = rv.lower()
if rv in no_choice:
return None
if rv in _choices:
return rv
|
whtsky/parguments
|
parguments/cli.py
|
Python
|
mit
| 3,906
|
import unittest
from stomp import backward3
class TestBackward3(unittest.TestCase):
def test_pack_mixed_string_and_bytes(self):
lines = ['SEND', '\n', 'header1:test', '\u6771']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
lines = ['SEND', '\n', 'header1:test', b'\xe6\x9d\xb1']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
def test_decode(self):
self.assertTrue(backward3.decode(None) is None)
self.assertEqual('test', backward3.decode(b'test'))
def test_encode(self):
self.assertEqual(b'test', backward3.encode('test'))
self.assertEqual(b'test', backward3.encode(b'test'))
self.assertRaises(TypeError, backward3.encode, None)
|
GeneralizedLearningUtilities/SuperGLU
|
python_module/stomp/test/p3_backward_test.py
|
Python
|
mit
| 884
|
import collections
class Change(object):
def __init__(self):
super(Change, self).__init__()
def makeChange(self, change):
coinVaulues = collections.OrderedDict()
coinVaulues['h'] = 50
coinVaulues['q'] = 25
coinVaulues['d'] = 10
coinVaulues['n'] = 5
coinVaulues['p'] = 1
coins = {}
for key in coinVaulues:
while change >= coinVaulues[key]:
coins[key] = coins.get(key, 0) + 1
change -= coinVaulues[key]
return coins
|
Bjornkjohnson/makeChangePython
|
Change.py
|
Python
|
mit
| 491
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from base64 import b64encode, b64decode
import datetime
import copy
import json
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseNotAllowed
from gripcontrol import Channel, HttpResponseFormat, HttpStreamFormat
from django_grip import set_hold_longpoll, set_hold_stream, publish
import redis_ops
def _setting(name, default):
v = getattr(settings, name, None)
if v is None:
return default
return v
db = redis_ops.RedisOps()
grip_prefix = _setting('WHINBOX_GRIP_PREFIX', 'wi-')
orig_headers = _setting('WHINBOX_ORIG_HEADERS', False)
# useful list derived from requestbin
ignore_headers = """
X-Varnish
X-Forwarded-For
X-Heroku-Dynos-In-Use
X-Request-Start
X-Heroku-Queue-Wait-Time
X-Heroku-Queue-Depth
X-Real-Ip
X-Forwarded-Proto
X-Via
X-Forwarded-Port
Grip-Sig
Grip-Feature
Grip-Last
""".split("\n")[1:-1]
def _ignore_header(name):
name = name.lower()
for h in ignore_headers:
if name == h.lower():
return True
return False
def _convert_header_name(name):
out = ''
word_start = True
for c in name:
if c == '_':
out += '-'
word_start = True
elif word_start:
out += c.upper()
word_start = False
else:
out += c.lower()
return out
def _req_to_item(req):
item = dict()
item['method'] = req.method
item['path'] = req.path
query = req.META.get('QUERY_STRING')
if query:
item['query'] = query
raw_headers = list()
content_length = req.META.get('CONTENT_LENGTH')
if content_length:
raw_headers.append(('CONTENT_LENGTH', content_length))
content_type = req.META.get('CONTENT_TYPE')
if content_type:
raw_headers.append(('CONTENT_TYPE', content_type))
for k, v in req.META.iteritems():
if k.startswith('HTTP_'):
raw_headers.append((k[5:], v))
# undjangoify the header names
headers = list()
for h in raw_headers:
headers.append((_convert_header_name(h[0]), h[1]))
if orig_headers:
# if this option is set, then we assume the exact headers are magic prefixed
tmp = list()
for h in headers:
if h[0].lower().startswith('eb9bf0f5-'):
tmp.append((h[0][9:], h[1]))
headers = tmp
else:
# otherwise, use the blacklist to clean things up
tmp = list()
for h in headers:
if not _ignore_header(h[0]):
tmp.append(h)
headers = tmp
item['headers'] = headers
if len(req.body) > 0:
try:
# if the body is valid utf-8, then store as text
item['body'] = req.body.decode('utf-8')
except:
# else, store as binary
item['body-bin'] = b64encode(req.body)
forwardedfor = req.META.get('HTTP_X_FORWARDED_FOR')
if forwardedfor:
ip_address = forwardedfor.split(',')[0].strip()
else:
ip_address = req.META['REMOTE_ADDR']
item['ip_address'] = ip_address
return item
def _convert_item(item, responded=False):
out = copy.deepcopy(item)
created = datetime.datetime.fromtimestamp(item['created']).isoformat()
if len(created) > 0 and created[-1] != 'Z':
created += 'Z'
out['created'] = created
if responded:
out['state'] = 'responded'
else:
out['state'] = 'response-pending'
return out
def root(req):
return HttpResponseNotFound('Not Found\n')
def create(req):
if req.method == 'POST':
host = req.META.get('HTTP_HOST')
if not host:
return HttpResponseBadRequest('Bad Request: No \'Host\' header\n')
inbox_id = req.POST.get('id')
if inbox_id is not None and len(inbox_id) > 64:
return HttpResponseBadRequest('Bad Request: Id length must not exceed 64\n')
ttl = req.POST.get('ttl')
if ttl is not None:
ttl = int(ttl)
if ttl is None:
ttl = 3600
response_mode = req.POST.get('response_mode')
if not response_mode:
response_mode = 'auto'
if response_mode not in ('auto', 'wait-verify', 'wait'):
return HttpResponseBadRequest('Bad Request: response_mode must be "auto", "wait-verify", or "wait"\n')
try:
inbox_id = db.inbox_create(inbox_id, ttl, response_mode)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectExists:
return HttpResponse('Conflict: Inbox already exists\n', status=409)
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['id'] = inbox_id
out['base_url'] = 'http://' + host + '/i/' + inbox_id + '/'
out['ttl'] = ttl
out['response_mode'] = response_mode
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else:
return HttpResponseNotAllowed(['POST'])
def inbox(req, inbox_id):
if req.method == 'GET':
host = req.META.get('HTTP_HOST')
if not host:
return HttpResponseBadRequest('Bad Request: No \'Host\' header\n')
try:
inbox = db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['id'] = inbox_id
out['base_url'] = 'http://' + host + '/i/' + inbox_id + '/'
out['ttl'] = inbox['ttl']
response_mode = inbox.get('response_mode')
if not response_mode:
response_mode = 'auto'
out['response_mode'] = response_mode
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
elif req.method == 'DELETE':
try:
db.inbox_delete(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
# we'll push a 404 to any long polls because we're that cool
publish(grip_prefix + 'inbox-%s' % inbox_id, HttpResponseFormat(code=404, headers={'Content-Type': 'text/html'}, body='Not Found\n'))
return HttpResponse('Deleted\n')
else:
return HttpResponseNotAllowed(['GET', 'DELETE'])
def refresh(req, inbox_id):
if req.method == 'POST':
ttl = req.POST.get('ttl')
if ttl is not None:
ttl = int(ttl)
try:
db.inbox_refresh(inbox_id, ttl)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
return HttpResponse('Refreshed\n')
else:
return HttpResponseNotAllowed(['POST'])
def respond(req, inbox_id, item_id):
if req.method == 'POST':
try:
content = json.loads(req.body)
except:
return HttpResponseBadRequest('Bad Request: Body must be valid JSON\n')
try:
code = content.get('code')
if code is not None:
code = int(code)
else:
code = 200
reason = content.get('reason')
headers = content.get('headers')
if 'body-bin' in content:
body = b64decode(content['body-bin'])
elif 'body' in content:
body = content['body']
else:
body = ''
except:
return HttpResponseBadRequest('Bad Request: Bad format of response\n')
try:
db.request_remove_pending(inbox_id, item_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
publish(grip_prefix + 'wait-%s-%s' % (inbox_id, item_id), HttpResponseFormat(code=code, reason=reason, headers=headers, body=body), id='1', prev_id='0')
return HttpResponse('Ok\n')
else:
return HttpResponseNotAllowed(['POST'])
def hit(req, inbox_id):
if len(req.grip.last) > 0:
for channel, last_id in req.grip.last.iteritems():
break
set_hold_longpoll(req, Channel(channel, last_id))
return HttpResponse('Service Unavailable\n', status=503, content_type='text/html')
try:
inbox = db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
response_mode = inbox.get('response_mode')
if not response_mode:
response_mode = 'auto'
# pubsubhubbub verify request?
hub_challenge = req.GET.get('hub.challenge')
if response_mode == 'wait' or (response_mode == 'wait-verify' and hub_challenge):
respond_now = False
else:
respond_now = True
item = _req_to_item(req)
if hub_challenge:
item['type'] = 'hub-verify'
else:
item['type'] = 'normal'
try:
item_id, prev_id, item_created = db.inbox_append_item(inbox_id, item)
db.inbox_clear_expired_items(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
item['id'] = item_id
item['created'] = item_created
item = _convert_item(item, respond_now)
hr_headers = dict()
hr_headers['Content-Type'] = 'application/json'
hr = dict()
hr['last_cursor'] = item_id
hr['items'] = [item]
hr_body = json.dumps(hr) + '\n'
hs_body = json.dumps(item) + '\n'
formats = list()
formats.append(HttpResponseFormat(headers=hr_headers, body=hr_body))
formats.append(HttpStreamFormat(hs_body))
publish(grip_prefix + 'inbox-%s' % inbox_id, formats, id=item_id, prev_id=prev_id)
if respond_now:
if hub_challenge:
return HttpResponse(hub_challenge)
else:
return HttpResponse('Ok\n')
else:
# wait for the user to respond
db.request_add_pending(inbox_id, item_id)
set_hold_longpoll(req, Channel(grip_prefix + 'wait-%s-%s' % (inbox_id, item_id), '0'))
return HttpResponse('Service Unavailable\n', status=503, content_type='text/html')
def items(req, inbox_id):
if req.method == 'GET':
try:
db.inbox_refresh(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
order = req.GET.get('order')
if order and order not in ('created', '-created'):
return HttpResponseBadRequest('Bad Request: Invalid order value\n')
if not order:
order = 'created'
imax = req.GET.get('max')
if imax:
try:
imax = int(imax)
if imax < 1:
raise ValueError('max too small')
except:
return HttpResponseBadRequest('Bad Request: Invalid max value\n')
if not imax or imax > 50:
imax = 50
since = req.GET.get('since')
since_id = None
since_cursor = None
if since:
if since.startswith('id:'):
since_id = since[3:]
elif since.startswith('cursor:'):
since_cursor = since[7:]
else:
return HttpResponseBadRequest('Bad Request: Invalid since value\n')
# at the moment, cursor is identical to id
item_id = None
if since_id:
item_id = since_id
elif since_cursor:
item_id = since_cursor
if order == 'created':
try:
items, last_id = db.inbox_get_items_after(inbox_id, item_id, imax)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['last_cursor'] = last_id
out_items = list()
for i in items:
out_items.append(_convert_item(i, not db.request_is_pending(inbox_id, i['id'])))
out['items'] = out_items
if len(out_items) == 0:
set_hold_longpoll(req, Channel(grip_prefix + 'inbox-%s' % inbox_id, last_id))
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else: # -created
try:
items, last_id, eof = db.inbox_get_items_before(inbox_id, item_id, imax)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
if not eof and last_id:
out['last_cursor'] = last_id
out_items = list()
for i in items:
out_items.append(_convert_item(i, not db.request_is_pending(inbox_id, i['id'])))
out['items'] = out_items
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else:
return HttpResponseNotAllowed(['GET'])
def stream(req, inbox_id):
if req.method == 'GET':
try:
db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
set_hold_stream(req, grip_prefix + 'inbox-%s' % inbox_id)
return HttpResponse('[opened]\n', content_type='text/plain')
else:
return HttpResponseNotAllowed(['GET'])
|
fanout/webhookinbox
|
api/views.py
|
Python
|
mit
| 12,930
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacGetOrganizationByIdResponse(BaseType):
def __init__(self, organization=None):
required = {
"organization": False,
}
self.organization = organization
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .UacOrganization import UacOrganization
tmp = d.get('organization', None)
if tmp is not None:
d['organization'] = UacOrganization.from_json(tmp)
return UacGetOrganizationByIdResponse(**d)
|
mitdbg/modeldb
|
client/verta/verta/_swagger/_public/uac/model/UacGetOrganizationByIdResponse.py
|
Python
|
mit
| 653
|
from fibonacci import Fibonacci
def ans():
return Fibonacci.index(Fibonacci.after(int('9' * 999)))
if __name__ == '__main__':
print(ans())
|
mackorone/euler
|
src/025.py
|
Python
|
mit
| 155
|
#! /usr/bin/python
import yaml
def main():
#f = open("data.yaml", "r")
f = open("data2.yaml", "r")
yd = yaml.load(f)
#print "YAML Data: %s" % str(yd)
for key in yd:
print "%s" % key
print "Type: %s" % str(type(yd[key]))
print str(yd[key])
print ""
def yaml_test():
#f = open("data.yaml", "r")
f = open("data2.yaml", "r")
yd = yaml.load(f)
#print "YAML Data: %s" % str(yd)
for key in yd:
print "%s" % key
print "Type: %s" % str(type(yd[key]))
print str(yd[key])
print ""
if __name__ == "__main__":
main()
|
CospanDesign/python
|
yaml/example1.py
|
Python
|
mit
| 620
|
# -*- coding: utf-8 -*-
import zmq
import random
import time
def run():
context = zmq.Context()
sender = context.socket(zmq.PUSH)
sender.bind('tcp://*:5557')
sink = context.socket(zmq.PUSH)
sink.connect('tcp://localhost:5558')
print 'Press Enter when the workers are ready: '
_ = raw_input()
print('sending tasks to workders...')
sink.send(b'0')
random.seed()
total_msec = 0
for task_nbr in xrange(100):
workload = random.randint(1, 100)
total_msec += workload
sender.send_string(u'%i' % workload)
print 'Total expected cost: %s msec' % total_msec
time.sleep(1)
if __name__ == '__main__':
run()
|
disenone/zsync
|
test/parallel_task/ventilator.py
|
Python
|
mit
| 694
|
from __future__ import absolute_import, print_function, division
import copy
import os
import re
import urwid
from mitmproxy import filt
from mitmproxy import script
from mitmproxy import utils
from mitmproxy.console import common
from mitmproxy.console import signals
from netlib.http import cookies
from netlib.http import user_agents
FOOTER = [
('heading_key', "enter"), ":edit ",
('heading_key', "q"), ":back ",
]
FOOTER_EDITING = [
('heading_key', "esc"), ":stop editing ",
]
class TextColumn:
subeditor = None
def __init__(self, heading):
self.heading = heading
def text(self, obj):
return SEscaped(obj or "")
def blank(self):
return ""
def keypress(self, key, editor):
if key == "r":
if editor.walker.get_current_value() is not None:
signals.status_prompt_path.send(
self,
prompt = "Read file",
callback = editor.read_file
)
elif key == "R":
if editor.walker.get_current_value() is not None:
signals.status_prompt_path.send(
editor,
prompt = "Read unescaped file",
callback = editor.read_file,
args = (True,)
)
elif key == "e":
o = editor.walker.get_current_value()
if o is not None:
n = editor.master.spawn_editor(o.encode("string-escape"))
n = utils.clean_hanging_newline(n)
editor.walker.set_current_value(n, False)
editor.walker._modified()
elif key in ["enter"]:
editor.walker.start_edit()
else:
return key
class SubgridColumn:
def __init__(self, heading, subeditor):
self.heading = heading
self.subeditor = subeditor
def text(self, obj):
p = cookies._format_pairs(obj, sep="\n")
return urwid.Text(p)
def blank(self):
return []
def keypress(self, key, editor):
if key in "rRe":
signals.status_message.send(
self,
message = "Press enter to edit this field.",
expire = 1000
)
return
elif key in ["enter"]:
editor.master.view_grideditor(
self.subeditor(
editor.master,
editor.walker.get_current_value(),
editor.set_subeditor_value,
editor.walker.focus,
editor.walker.focus_col
)
)
else:
return key
class SEscaped(urwid.WidgetWrap):
def __init__(self, txt):
txt = txt.encode("string-escape")
w = urwid.Text(txt, wrap="any")
urwid.WidgetWrap.__init__(self, w)
def get_text(self):
return self._w.get_text()[0]
def keypress(self, size, key):
return key
def selectable(self):
return True
class SEdit(urwid.WidgetWrap):
def __init__(self, txt):
txt = txt.encode("string-escape")
w = urwid.Edit(edit_text=txt, wrap="any", multiline=True)
w = urwid.AttrWrap(w, "editfield")
urwid.WidgetWrap.__init__(self, w)
def get_text(self):
return self._w.get_text()[0].strip()
def selectable(self):
return True
class GridRow(urwid.WidgetWrap):
def __init__(self, focused, editing, editor, values):
self.focused, self.editing, self.editor = focused, editing, editor
errors = values[1]
self.fields = []
for i, v in enumerate(values[0]):
if focused == i and editing:
self.editing = SEdit(v)
self.fields.append(self.editing)
else:
w = self.editor.columns[i].text(v)
if focused == i:
if i in errors:
w = urwid.AttrWrap(w, "focusfield_error")
else:
w = urwid.AttrWrap(w, "focusfield")
elif i in errors:
w = urwid.AttrWrap(w, "field_error")
self.fields.append(w)
fspecs = self.fields[:]
if len(self.fields) > 1:
fspecs[0] = ("fixed", self.editor.first_width + 2, fspecs[0])
w = urwid.Columns(
fspecs,
dividechars = 2
)
if focused is not None:
w.set_focus_column(focused)
urwid.WidgetWrap.__init__(self, w)
def get_edit_value(self):
return self.editing.get_text()
def keypress(self, s, k):
if self.editing:
w = self._w.column_widths(s)[self.focused]
k = self.editing.keypress((w,), k)
return k
def selectable(self):
return True
class GridWalker(urwid.ListWalker):
"""
Stores rows as a list of (rows, errors) tuples, where rows is a list
and errors is a set with an entry of each offset in rows that is an
error.
"""
def __init__(self, lst, editor):
self.lst = [(i, set([])) for i in lst]
self.editor = editor
self.focus = 0
self.focus_col = 0
self.editing = False
def _modified(self):
self.editor.show_empty_msg()
return urwid.ListWalker._modified(self)
def add_value(self, lst):
self.lst.append((lst[:], set([])))
self._modified()
def get_current_value(self):
if self.lst:
return self.lst[self.focus][0][self.focus_col]
def set_current_value(self, val, unescaped):
if not unescaped:
try:
val = val.decode("string-escape")
except ValueError:
signals.status_message.send(
self,
message = "Invalid Python-style string encoding.",
expire = 1000
)
return
errors = self.lst[self.focus][1]
emsg = self.editor.is_error(self.focus_col, val)
if emsg:
signals.status_message.send(message = emsg, expire = 1)
errors.add(self.focus_col)
else:
errors.discard(self.focus_col)
self.set_value(val, self.focus, self.focus_col, errors)
def set_value(self, val, focus, focus_col, errors=None):
if not errors:
errors = set([])
row = list(self.lst[focus][0])
row[focus_col] = val
self.lst[focus] = [tuple(row), errors]
self._modified()
def delete_focus(self):
if self.lst:
del self.lst[self.focus]
self.focus = min(len(self.lst) - 1, self.focus)
self._modified()
def _insert(self, pos):
self.focus = pos
self.lst.insert(
self.focus,
[
[c.blank() for c in self.editor.columns], set([])
]
)
self.focus_col = 0
self.start_edit()
def insert(self):
return self._insert(self.focus)
def add(self):
return self._insert(min(self.focus + 1, len(self.lst)))
def start_edit(self):
col = self.editor.columns[self.focus_col]
if self.lst and not col.subeditor:
self.editing = GridRow(
self.focus_col, True, self.editor, self.lst[self.focus]
)
self.editor.master.loop.widget.footer.update(FOOTER_EDITING)
self._modified()
def stop_edit(self):
if self.editing:
self.editor.master.loop.widget.footer.update(FOOTER)
self.set_current_value(self.editing.get_edit_value(), False)
self.editing = False
self._modified()
def left(self):
self.focus_col = max(self.focus_col - 1, 0)
self._modified()
def right(self):
self.focus_col = min(self.focus_col + 1, len(self.editor.columns) - 1)
self._modified()
def tab_next(self):
self.stop_edit()
if self.focus_col < len(self.editor.columns) - 1:
self.focus_col += 1
elif self.focus != len(self.lst) - 1:
self.focus_col = 0
self.focus += 1
self._modified()
def get_focus(self):
if self.editing:
return self.editing, self.focus
elif self.lst:
return GridRow(
self.focus_col,
False,
self.editor,
self.lst[self.focus]
), self.focus
else:
return None, None
def set_focus(self, focus):
self.stop_edit()
self.focus = focus
self._modified()
def get_next(self, pos):
if pos + 1 >= len(self.lst):
return None, None
return GridRow(None, False, self.editor, self.lst[pos + 1]), pos + 1
def get_prev(self, pos):
if pos - 1 < 0:
return None, None
return GridRow(None, False, self.editor, self.lst[pos - 1]), pos - 1
class GridListBox(urwid.ListBox):
def __init__(self, lw):
urwid.ListBox.__init__(self, lw)
FIRST_WIDTH_MAX = 40
FIRST_WIDTH_MIN = 20
class GridEditor(urwid.WidgetWrap):
title = None
columns = None
def __init__(self, master, value, callback, *cb_args, **cb_kwargs):
value = self.data_in(copy.deepcopy(value))
self.master, self.value, self.callback = master, value, callback
self.cb_args, self.cb_kwargs = cb_args, cb_kwargs
first_width = 20
if value:
for r in value:
assert len(r) == len(self.columns)
first_width = max(len(r), first_width)
self.first_width = min(first_width, FIRST_WIDTH_MAX)
title = urwid.Text(self.title)
title = urwid.Padding(title, align="left", width=("relative", 100))
title = urwid.AttrWrap(title, "heading")
headings = []
for i, col in enumerate(self.columns):
c = urwid.Text(col.heading)
if i == 0 and len(self.columns) > 1:
headings.append(("fixed", first_width + 2, c))
else:
headings.append(c)
h = urwid.Columns(
headings,
dividechars = 2
)
h = urwid.AttrWrap(h, "heading")
self.walker = GridWalker(self.value, self)
self.lb = GridListBox(self.walker)
self._w = urwid.Frame(
self.lb,
header = urwid.Pile([title, h])
)
self.master.loop.widget.footer.update("")
self.show_empty_msg()
def show_empty_msg(self):
if self.walker.lst:
self._w.set_footer(None)
else:
self._w.set_footer(
urwid.Text(
[
("highlight", "No values. Press "),
("key", "a"),
("highlight", " to add some."),
]
)
)
def encode(self, s):
if not self.encoding:
return s
try:
return s.encode(self.encoding)
except ValueError:
return None
def read_file(self, p, unescaped=False):
if p:
try:
p = os.path.expanduser(p)
d = file(p, "rb").read()
self.walker.set_current_value(d, unescaped)
self.walker._modified()
except IOError as v:
return str(v)
def set_subeditor_value(self, val, focus, focus_col):
self.walker.set_value(val, focus, focus_col)
def keypress(self, size, key):
if self.walker.editing:
if key in ["esc"]:
self.walker.stop_edit()
elif key == "tab":
pf, pfc = self.walker.focus, self.walker.focus_col
self.walker.tab_next()
if self.walker.focus == pf and self.walker.focus_col != pfc:
self.walker.start_edit()
else:
self._w.keypress(size, key)
return None
key = common.shortcuts(key)
column = self.columns[self.walker.focus_col]
if key in ["q", "esc"]:
res = []
for i in self.walker.lst:
if not i[1] and any([x for x in i[0]]):
res.append(i[0])
self.callback(self.data_out(res), *self.cb_args, **self.cb_kwargs)
signals.pop_view_state.send(self)
elif key == "g":
self.walker.set_focus(0)
elif key == "G":
self.walker.set_focus(len(self.walker.lst) - 1)
elif key in ["h", "left"]:
self.walker.left()
elif key in ["l", "right"]:
self.walker.right()
elif key == "tab":
self.walker.tab_next()
elif key == "a":
self.walker.add()
elif key == "A":
self.walker.insert()
elif key == "d":
self.walker.delete_focus()
elif column.keypress(key, self) and not self.handle_key(key):
return self._w.keypress(size, key)
def data_out(self, data):
"""
Called on raw list data, before data is returned through the
callback.
"""
return data
def data_in(self, data):
"""
Called to prepare provided data.
"""
return data
def is_error(self, col, val):
"""
Return False, or a string error message.
"""
return False
def handle_key(self, key):
return False
def make_help(self):
text = []
text.append(urwid.Text([("text", "Editor control:\n")]))
keys = [
("A", "insert row before cursor"),
("a", "add row after cursor"),
("d", "delete row"),
("e", "spawn external editor on current field"),
("q", "save changes and exit editor"),
("r", "read value from file"),
("R", "read unescaped value from file"),
("esc", "save changes and exit editor"),
("tab", "next field"),
("enter", "edit field"),
]
text.extend(
common.format_keyvals(keys, key="key", val="text", indent=4)
)
text.append(
urwid.Text(
[
"\n",
("text", "Values are escaped Python-style strings.\n"),
]
)
)
return text
class QueryEditor(GridEditor):
title = "Editing query"
columns = [
TextColumn("Key"),
TextColumn("Value")
]
class HeaderEditor(GridEditor):
title = "Editing headers"
columns = [
TextColumn("Key"),
TextColumn("Value")
]
def make_help(self):
h = GridEditor.make_help(self)
text = []
text.append(urwid.Text([("text", "Special keys:\n")]))
keys = [
("U", "add User-Agent header"),
]
text.extend(
common.format_keyvals(keys, key="key", val="text", indent=4)
)
text.append(urwid.Text([("text", "\n")]))
text.extend(h)
return text
def set_user_agent(self, k):
ua = user_agents.get_by_shortcut(k)
if ua:
self.walker.add_value(
[
"User-Agent",
ua[2]
]
)
def handle_key(self, key):
if key == "U":
signals.status_prompt_onekey.send(
prompt = "Add User-Agent header:",
keys = [(i[0], i[1]) for i in user_agents.UASTRINGS],
callback = self.set_user_agent,
)
return True
class URLEncodedFormEditor(GridEditor):
title = "Editing URL-encoded form"
columns = [
TextColumn("Key"),
TextColumn("Value")
]
class ReplaceEditor(GridEditor):
title = "Editing replacement patterns"
columns = [
TextColumn("Filter"),
TextColumn("Regex"),
TextColumn("Replacement"),
]
def is_error(self, col, val):
if col == 0:
if not filt.parse(val):
return "Invalid filter specification."
elif col == 1:
try:
re.compile(val)
except re.error:
return "Invalid regular expression."
return False
class SetHeadersEditor(GridEditor):
title = "Editing header set patterns"
columns = [
TextColumn("Filter"),
TextColumn("Header"),
TextColumn("Value"),
]
def is_error(self, col, val):
if col == 0:
if not filt.parse(val):
return "Invalid filter specification"
return False
def make_help(self):
h = GridEditor.make_help(self)
text = []
text.append(urwid.Text([("text", "Special keys:\n")]))
keys = [
("U", "add User-Agent header"),
]
text.extend(
common.format_keyvals(keys, key="key", val="text", indent=4)
)
text.append(urwid.Text([("text", "\n")]))
text.extend(h)
return text
def set_user_agent(self, k):
ua = user_agents.get_by_shortcut(k)
if ua:
self.walker.add_value(
[
".*",
"User-Agent",
ua[2]
]
)
def handle_key(self, key):
if key == "U":
signals.status_prompt_onekey.send(
prompt = "Add User-Agent header:",
keys = [(i[0], i[1]) for i in user_agents.UASTRINGS],
callback = self.set_user_agent,
)
return True
class PathEditor(GridEditor):
title = "Editing URL path components"
columns = [
TextColumn("Component"),
]
def data_in(self, data):
return [[i] for i in data]
def data_out(self, data):
return [i[0] for i in data]
class ScriptEditor(GridEditor):
title = "Editing scripts"
columns = [
TextColumn("Command"),
]
def is_error(self, col, val):
try:
script.Script.parse_command(val)
except script.ScriptException as e:
return str(e)
class HostPatternEditor(GridEditor):
title = "Editing host patterns"
columns = [
TextColumn("Regex (matched on hostname:port / ip:port)")
]
def is_error(self, col, val):
try:
re.compile(val, re.IGNORECASE)
except re.error as e:
return "Invalid regex: %s" % str(e)
def data_in(self, data):
return [[i] for i in data]
def data_out(self, data):
return [i[0] for i in data]
class CookieEditor(GridEditor):
title = "Editing request Cookie header"
columns = [
TextColumn("Name"),
TextColumn("Value"),
]
class CookieAttributeEditor(GridEditor):
title = "Editing Set-Cookie attributes"
columns = [
TextColumn("Name"),
TextColumn("Value"),
]
def data_out(self, data):
ret = []
for i in data:
if not i[1]:
ret.append([i[0], None])
else:
ret.append(i)
return ret
class SetCookieEditor(GridEditor):
title = "Editing response SetCookie header"
columns = [
TextColumn("Name"),
TextColumn("Value"),
SubgridColumn("Attributes", CookieAttributeEditor),
]
def data_in(self, data):
flattened = []
for key, (value, attrs) in data:
flattened.append([key, value, attrs.items(multi=True)])
return flattened
def data_out(self, data):
vals = []
for key, value, attrs in data:
vals.append(
[
key,
(value, attrs)
]
)
return vals
|
tdickers/mitmproxy
|
mitmproxy/console/grideditor.py
|
Python
|
mit
| 19,995
|
#!/usr/bin/env python
# A library to scrape statistics from Arris CM820 and similar cable modems
# Inspired by https://gist.github.com/berg/2651577
import BeautifulSoup
import requests
import time
cm_time_format = '%a %Y-%m-%d %H:%M:%S'
def get_status(baseurl):
# Retrieve and process the page from the modem
url = baseurl + 'status_cgi'
pagedata = requests.get(url).content
timestamp = time.time() # Get the time immediately after retrieval
bs = BeautifulSoup.BeautifulSoup(pagedata)
downstream_table = bs.findAll('table')[1].findAll('tr')[1:]
upstream_table = bs.findAll('table')[3].findAll('tr')[2:]
status_table = bs.findAll('table')[5].findAll('tr')
interface_table = bs.findAll('table')[7].findAll('tr')[1:]
downstream_stats = []
for row in downstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
if cols[3].string.strip() == '----':
channel_available = False
power = None
snr = None
modulation = None
octets = None
corrected_errors = None
uncorrectable_errors = None
else:
power = float(cols[3].string.strip().split()[0])
snr = float(cols[4].string.strip().split()[0])
modulation = cols[5].string.strip()
octets = int(cols[6].string.strip())
corrected_errors = int(cols[7].string.strip())
uncorrectable_errors = int(cols[8].string.strip())
channelstats = {'modem_channel': modem_channel,
'dcid': docsis_channel,
'frequency': frequency,
'power': power,
'snr': snr,
'modulation': modulation,
'octets': octets,
'corrected_errors': corrected_errors,
'uncorrectable_errors': uncorrectable_errors}
downstream_stats.append(channelstats)
upstream_stats = []
for row in upstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
power = float(cols[3].string.strip().split()[0])
channel_type = cols[4].string.strip()
symbol_rate = int(cols[5].string.strip().split()[0]) * 1000
modulation = cols[6].string.strip()
channelstats = {'modem_channel': modem_channel,
'ucid': docsis_channel,
'frequency': frequency,
'power': power,
'channel_type': channel_type,
'symbol_rate': symbol_rate,
'modulation': modulation}
upstream_stats.append(channelstats)
uptime_split = status_table[0].findAll('td')[1].string.strip().split(':')
uptime_days = int(uptime_split[0].strip().split()[0])
uptime_hours = int(uptime_split[1].strip().split()[0])
uptime_minutes = int(uptime_split[2].strip().split()[0])
uptime = ((((uptime_days * 24) + uptime_hours) * 60) + uptime_minutes) * 60
cpe_split = status_table[1].findAll('td')[1].string.strip().split(',')
cpelist = {}
for entry in cpe_split:
entrystripped = entry.strip()
entrysplit = entrystripped.split('CPE')
cpe_type = entrysplit[0]
cpe_count = int(entrysplit[1].strip('()'))
cpelist[cpe_type] = cpe_count
cm_status = status_table[2].findAll('td')[1].string.strip()
cm_time_string = status_table[3].findAll('td')[1].string.strip()
cm_time = time.mktime(time.strptime(cm_time_string, cm_time_format))
modem_status = {'uptime': uptime,
'cpe': cpelist,
'cm_status': cm_status,
'cm_time': cm_time}
interfaces = []
for row in interface_table:
cols = row.findAll('td')
interface_name = cols[0].string.strip()
provisioning_state = cols[1].string.strip()
interface_state = cols[2].string.strip()
interface_speed = cols[3].string.strip()
mac = cols[4].string.strip()
interface_data = {'name': interface_name,
'provisioned': provisioning_state,
'state': interface_state,
'speed': interface_speed,
'mac': mac}
interfaces.append(interface_data)
status = {'timestamp': timestamp,
'status': modem_status,
'downstream': downstream_stats,
'upstream': upstream_stats,
'interfaces': interfaces}
return status
def get_versions(baseurl):
raise NotImplementedError()
def get_eventlog(baseurl):
raise NotImplementedError()
def get_cmstate(baseurl):
raise NotImplementedError()
def get_productdetails(baseurl):
raise NotImplementedError()
def get_dhcpparams(baseurl):
raise NotImplementedError()
def get_qos(url):
raise NotImplementedError()
def get_config(url):
raise NotImplementedError()
|
wolrah/arris_stats
|
arris_scraper.py
|
Python
|
mit
| 5,303
|
import logging
from src.settings import JINJA_ENVIRONMENT
from src.base import BaseHandler
from src.main.models import Torrent, UserTorrent
from google.appengine.ext import ndb
from google.appengine.api import users
import arrow
from time import sleep
class IndexPage(BaseHandler):
def get(self):
# new movies
self.template_values['movies'] = Torrent.query(Torrent.category_code == 207, Torrent.uploader == 'YIFY', Torrent.resolution == 720).order(-Torrent.uploaded_at).fetch(30)
# new series
self.template_values['series_new'] = Torrent.query(Torrent.category_code == 205, Torrent.series_episode == 1).order(-Torrent.uploaded_at).fetch(15)
episodes_new = []
series_watching = []
# watching series
uts = UserTorrent.query(UserTorrent.user == users.get_current_user(), UserTorrent.category_code == 205).fetch()
if uts:
series_watching = set()
for ut in [ut for ut in uts if ut.torrent.get().series_title]:
series_watching.add(ut.torrent.get().series_title)
logging.info('{0} series being watched by user'.format(len(uts)))
# new episodes
if series_watching:
cutoff = arrow.utcnow().replace(days=-14).datetime
episodes_new = Torrent.query(Torrent.series_title.IN(series_watching), Torrent.uploaded_at > cutoff, Torrent.category_code == 205).order(-Torrent.uploaded_at).fetch()
logging.info('{0} episodes fetched for watched series'.format(len(episodes_new)))
self.template_values['series_watching'] = series_watching
self.template_values['episodes_new'] = episodes_new
# logging.info('{0}'.format(self.template_values))
template = JINJA_ENVIRONMENT.get_template('main/templates/index.html')
self.response.write(template.render(self.template_values))
class CategoryPage(BaseHandler):
def get(self, cat):
logging.info('cat {0}'.format(cat))
self.template_values['cat'] = int(cat)
# get torrents
torrents = Torrent.query(Torrent.category_code == int(cat)).order(-Torrent.uploaded_at).fetch()
self.template_values['torrents'] = torrents
logging.info('torrents {0}'.format(len(torrents)))
template = JINJA_ENVIRONMENT.get_template('main/templates/category.html')
self.response.write(template.render(self.template_values))
class DownloadPage(BaseHandler):
def get(self, key):
logging.info('download {0}'.format(key))
logging.info('user {0}'.format(self.user))
torrent = ndb.Key(urlsafe=key).get()
logging.info('torrent {0}'.format(torrent))
ut = UserTorrent.query(UserTorrent.user == self.user, UserTorrent.torrent == torrent.key).get()
if not ut:
ut = UserTorrent(user=self.user, torrent=torrent.key, category_code=torrent.category_code)
ut.put()
logging.info('User Torrent saved')
else:
ut.key.delete()
logging.info('User Torrent deleted')
logging.info('User Torrent {0}'.format(ut))
self.response.status = '200 OK'
|
Tjorriemorrie/trading
|
18_theoryofruns/app_old/src/main/main.py
|
Python
|
mit
| 3,167
|
from django.shortcuts import render
from rest_framework import viewsets
from waterfall_wall.serializers import ImageSerializer
from waterfall_wall.models import Image
def index(request):
context = {}
return render(request, 'waterfall_wall/index.html', context)
class ImageViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows images to be viewed.
"""
queryset = Image.objects.all()
serializer_class = ImageSerializer
|
carlcarl/rcard
|
waterfall_wall/views.py
|
Python
|
mit
| 455
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from photos.models import PhotoSceneCategory
from photos.add import add_photo
#from licenses.models import License
class Command(BaseCommand):
args = '<flickr_dir>'
help = 'Adds photos from flickr'
def handle(self, *args, **options):
admin_user = User.objects.get_or_create(
username='admin')[0].get_profile()
print 'user:', admin_user
name = 'kitchen'
scene_category, _ = PhotoSceneCategory.objects \
.get_or_create(name=name)
path = args[0]
if not path:
print 'No path'
return
try:
photo = add_photo(
path=path,
user=admin_user,
scene_category=scene_category,
flickr_user=None,
flickr_id=None,
license=None,
exif='',
fov=None,
)
except Exception as e:
print '\nNot adding photo:', e
else:
print '\nAdded photo:', path
photo.synthetic = True
photo.save()
|
seanbell/opensurfaces
|
server/photos/management/commands/add_special.py
|
Python
|
mit
| 1,191
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable=W0141
import sys
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import(StringIO, lzip, range, map, zip, reduce, u,
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
import numpy as np
import itertools
import csv
docstring_to_string = """
Parameters
----------
frame : DataFrame
object to render
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
whether to print column labels, default True
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box.
index_names : bool, optional
Prints the names of the indexes, default True
force_unicode : bool, default False
Always return a unicode result. Deprecated in v0.10.0 as string
formatting is now rendered to unicode by default.
Returns
-------
formatted : string (or unicode, depending on data and options)"""
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True,
na_rep='NaN', name=False, footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.name = name
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.name:
name = com.pprint_thing(self.categorical.name,
escape_chars=('\t', '\r', '\n'))
footer += ('Name: %s' % name if self.categorical.name is not None
else '')
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None,
na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[')+result+u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self._chk_truncate()
def _chk_truncate(self):
from pandas.tools.merge import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" %
series_name) if name is not None else ""
if self.length:
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % com.pprint_thing(name)
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if name and com.is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series.values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series.get_values(), None,
float_format=self.float_format,
na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
maxlen = max(len(x) for x in fmt_index) # max index len
pad_space = min(maxlen, 60)
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = len(fmt_values[row_num-1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
dot_str = dot_str.center(width)
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
result = adjoin(3, *[fmt_index[1:], fmt_values])
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
def _strlen_func():
if compat.PY3: # pragma: no cover
_strlen = len
else:
encoding = get_option("display.encoding")
def _strlen(x):
try:
return len(x.decode(encoding))
except UnicodeError:
return len(x)
return _strlen
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return self.show_dimensions is True or (self.show_dimensions == 'truncate' and
self.is_truncated)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if com.is_integer(i):
return self.formatters[i]
else:
return None
else:
if com.is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += docstring_to_string
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, **kwds):
self.frame = frame
self.buf = buf if buf is not None else StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
def _chk_truncate(self):
'''
Checks whether the frame should be truncated. If so, slices
the frame up.
'''
from pandas.tools.merge import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
max_rows_adj = self.h - n_add_rows # rows available to fill with actual data
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
_strlen = _strlen_func()
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
str_columns = self._get_formatted_column_labels(frame)
if self.header:
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
max_colwidth = max(self.col_space or 0,
*(_strlen(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth)
max_len = max(np.max([_strlen(x) for x in fmt_values]),
max_colwidth)
if self.justify == 'left':
cheader = [x.ljust(max_len) for x in cheader]
else:
cheader = [x.rjust(max_len) for x in cheader]
stringified.append(cheader + fmt_values)
else:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0))
stringified.append(fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
col_width = len(strcols[self.tr_size_col][0]) # infer from column header
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
cwidth = len(strcols[ix][row_num]) # infer from above row
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_str = my_str.ljust(cwidth)
elif is_dot_col:
cwidth = len(strcols[self.tr_size_col][0])
dot_str = my_str.center(cwidth)
else:
dot_str = my_str.rjust(cwidth)
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
com.pprint_thing(frame.columns),
com.pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print the whole frame
text = adjoin(1, *strcols)
elif not isinstance(self.max_cols, int) or self.max_cols > 0: # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = adjoin(1, *strcols).split('\n')
row_lens = Series(text).apply(len)
max_len_col_ix = np.argmax(row_lens)
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]"
% (len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([len(x) for x in idx]).max() + adjoin_width
col_widths = [np.array([len(x) for x in col]).max()
if len(col) > 0 else 0
for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
self.escape = self.kwds.get('escape', True)
# TODO: column_format is not settable in df.to_latex
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
frame.columns, frame.index))
strcols = [[info_line]]
else:
strcols = self._to_str_columns()
if self.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.columns.names)
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format(name=name)
blank = ' ' * len(lev2[0])
lev3 = [blank] * clevels
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
lev3.extend([lev2[level_idx]] + [blank] * (count - 1))
strcols.insert(i, lev3)
if column_format is None:
dtypes = self.frame.dtypes.values
column_format = ''.join(map(get_col_type, dtypes))
if self.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
% type(column_format))
def write(buf, frame, column_format, strcols, longtable=False):
if not longtable:
buf.write('\\begin{tabular}{%s}\n' % column_format)
buf.write('\\toprule\n')
else:
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
nlevels = frame.columns.nlevels
for i, row in enumerate(zip(*strcols)):
if i == nlevels:
buf.write('\\midrule\n') # End of header
if longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{3}{r}{{Continued on next '
'page}} \\\\\n')
buf.write('\midrule\n')
buf.write('\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.escape:
crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first
.replace('_', '\\_')
.replace('%', '\\%')
.replace('$', '\\$')
.replace('#', '\\#')
.replace('{', '\\{')
.replace('}', '\\}')
.replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum')
.replace('&', '\\&') if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if not longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
if hasattr(self.buf, 'write'):
write(self.buf, frame, column_format, strcols, longtable)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
write(f, frame, column_format, strcols, longtable)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
(frame.iloc[:, i]).get_values(),
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
def to_html(self, classes=None):
"""
Render a DataFrame to a html table.
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
def is_numeric_dtype(dtype):
return issubclass(dtype.type, np.number)
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes.values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if y not in self.formatters and need_leadsp[x] and not restrict_formatting:
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x
if not self._get_formatter(i) and need_leadsp[x]
else x]
for i, (col, x) in
enumerate(zip(columns, fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names,
formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(
list(x), justify='left', minimum=(self.col_space or 0)))
for x in fmt_index]
adjoined = adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
def write(self, s, indent=0):
rs = com.pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if (self.fmt.col_space is not None
and self.fmt.col_space > 0):
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict(
[('&', r'&'), ('<', r'<'), ('>', r'>')]
)
else:
esc = {}
rs = com.pprint_thing(s, escape_chars=esc).strip()
self.write(
'%s%s</%s>' % (start_tag, rs, kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: %s;">' % align, indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError(('classes must be list or tuple, '
'not %s') % type(self.classes))
_classes.extend(self.classes)
self.write('<table border="1" class="%s">' % ' '.join(_classes),
indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)))
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: %s;" % self.fmt.justify
row.extend([single_column_table(c, self.fmt.justify, style) for
c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="%d" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel,
adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
else: # sparse col headers do not receive a ...
values = (values[:ins_col] + (values[ins_col - 1],) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + [u('...')] + values[ins_col:]
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None
else com.pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if self.fmt.has_index_names:
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(len(self.frame)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta, tags=None,
nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="%d" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple([u('...')]*len(level_lengths)))
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(sparsify=False,
adjoin=False,
names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
def _get_level_lengths(levels, sentinel=''):
from itertools import groupby
def _make_grouper():
record = {'count': 0}
def grouper(x):
if x != sentinel:
record['count'] += 1
return record['count']
return grouper
result = []
for lev in levels:
i = 0
f = _make_grouper()
recs = {}
for key, gpr in groupby(lev, f):
values = list(gpr)
recs[i] = len(values)
i += len(values)
result.append(recs)
return result
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
line_terminator='\n', chunksize=None, engine=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
self.engine = engine # remove for 0.13
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = path_or_buf
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
# GH3457
if not self.obj.columns.is_unique and engine == 'python':
raise NotImplementedError("columns.is_unique == False not "
"supported with engine='python'")
self.tupleize_cols = tupleize_cols
self.has_mi_columns = isinstance(obj.columns, MultiIndex
) and not self.tupleize_cols
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = np.asarray(list(cols))
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = np.asarray(list(cols))
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if isinstance(obj.index, PeriodIndex):
self.data_index = obj.index.to_timestamp()
if (isinstance(self.data_index, DatetimeIndex) and
date_format is not None):
self.data_index = Index([x.strftime(date_format)
if notnull(x) else ''
for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
# original python implem. of df.to_csv
# invoked by df.to_csv(engine=python)
def _helper_csv(self, writer, na_rep=None, cols=None,
header=True, index=True,
index_label=None, float_format=None, date_format=None):
if cols is None:
cols = self.columns
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if has_aliases or header:
if index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(self.obj.index, MultiIndex):
index_label = []
for i, name in enumerate(self.obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = self.obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
encoded_cols = list(write_cols)
writer.writerow(encoded_labels + encoded_cols)
else:
encoded_cols = list(cols)
writer.writerow(encoded_cols)
if date_format is None:
date_formatter = lambda x: Timestamp(x)._repr_base
else:
def strftime_with_nulls(x):
x = Timestamp(x)
if notnull(x):
return x.strftime(date_format)
date_formatter = lambda x: strftime_with_nulls(x)
data_index = self.obj.index
if isinstance(self.obj.index, PeriodIndex):
data_index = self.obj.index.to_timestamp()
if isinstance(data_index, DatetimeIndex) and date_format is not None:
data_index = Index([date_formatter(x) for x in data_index])
values = self.obj.copy()
values.index = data_index
values.columns = values.columns.to_native_types(
na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
values = values[cols]
series = {}
for k, v in compat.iteritems(values._series):
series[k] = v.values
nlevels = getattr(data_index, 'nlevels', 1)
for j, idx in enumerate(data_index):
row_fields = []
if index:
if nlevels == 1:
row_fields = [idx]
else: # handle MultiIndex
row_fields = list(idx)
for i, col in enumerate(cols):
val = series[col][j]
if lib.checknull(val):
val = na_rep
if float_format is not None and com.is_float(val):
val = float_format % val
elif isinstance(val, (np.datetime64, Timestamp)):
val = date_formatter(val)
row_fields.append(val)
writer.writerow(row_fields)
def save(self):
# create the writer & save
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f = com._get_handle(self.path_or_buf, self.mode,
encoding=self.encoding)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
self.writer = com.UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
if self.engine == 'python':
# to be removed in 0.13
self._helper_csv(self.writer, na_rep=self.na_rep,
float_format=self.float_format,
cols=self.cols, header=self.header,
index=self.index,
index_label=self.index_label,
date_format=self.date_format)
else:
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label, (list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns:
encoded_labels += list(write_cols)
# write out the mi
if has_mi_columns:
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns.get_level_values(i))
writer.writerow(col_line)
# add blanks for the columns, so that we
# have consistent seps
encoded_labels.extend([''] * len(columns))
# write out the index label line
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# from collections import namedtuple
# ExcelCell = namedtuple("ExcelCell",
# 'row, col, val, style, mergestart, mergeend')
class ExcelCell(object):
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
def __init__(self, row, col, val,
style=None, mergestart=None, mergeend=None):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
header_style = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
class ExcelFormatter(object):
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : dataframe
na_rep: na representation
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
output row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : boolean, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf'):
self.df = df
self.rowcounter = 0
self.na_rep = na_rep
self.columns = cols
if cols is None:
self.columns = df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
def _format_value(self, val):
if lib.checknull(val):
val = self.na_rep
elif com.is_float(val):
if np.isposinf(val):
val = self.inf_rep
elif np.isneginf(val):
val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
return val
def _format_header_mi(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not(has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(sparsify=True, adjoin=False, names=False)
level_lengths = _get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum in range(len(level_lengths)):
name = columns.names[lnum]
yield ExcelCell(lnum, coloffset, name, header_style)
for lnum, (spans, levels, labels) in enumerate(zip(level_lengths,
columns.levels,
columns.labels)
):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style,
lnum,
coloffset + i + spans[i])
else:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(com.pprint_thing, values))
yield ExcelCell(lnum, coloffset + i + 1, v, header_style)
self.rowcounter = lnum
def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(self.columns), len(self.header))))
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
header_style)
def _format_header(self):
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2 = ()
if self.df.index.names:
row = [x if x is not None else ''
for x in self.df.index.names] + [''] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
for colindex, val in enumerate(row))
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self):
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
coloffset = 0
# output index and index_label?
if self.index:
# chek aliases
# if list only take first as this is not a MultiIndex
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if index_label and self.header is not False:
if self.merge_cells:
yield ExcelCell(self.rowcounter,
0,
index_label,
header_style)
self.rowcounter += 1
else:
yield ExcelCell(self.rowcounter - 1,
0,
index_label,
header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
coloffset = 1
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
# Get a frame that will account for any duplicates in the column names.
col_mapped_frame = self.df.loc[:, self.columns]
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = col_mapped_frame.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val)
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_labels = self.index_label
# if index labels are not empty go ahead and dump
if (any(x is not None for x in index_labels)
and self.header is not False):
if not self.merge_cells:
self.rowcounter -= 1
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter,
cidx,
name,
header_style)
self.rowcounter += 1
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(sparsify=True, adjoin=False,
names=False)
level_lengths = _get_level_lengths(level_strs)
for spans, levels, labels in zip(level_lengths,
self.df.index.levels,
self.df.index.labels):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style,
self.rowcounter + i + spans[i] - 1,
gcolidx)
else:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(self.rowcounter + idx,
gcolidx,
indexcolval,
header_style)
gcolidx += 1
# Get a frame that will account for any duplicates in the column names.
col_mapped_frame = self.df.loc[:, self.columns]
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = col_mapped_frame.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val)
def get_formatted_cells(self):
for cell in itertools.chain(self._format_header(),
self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right'):
if com.is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif com.is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif com.is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif com.is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format,
formatter=formatter, space=space,
justify=justify)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right'):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = '%% .%dg' % get_option("display.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
formatter = self.formatter if self.formatter is not None else \
(lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n')))
def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
else:
# object dtype
return '%s' % formatter(x)
vals = self.values
is_float = lib.map_infer(vals, com.is_float) & notnull(vals)
leading_space = is_float.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float[i] and leading_space:
fmt_values.append(' %s' % _format(v))
elif is_float[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(' %s' % _format(v))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
if self.float_format is not None and self.formatter is None:
self.formatter = self.float_format
def _format_with(self, fmt_str):
def _val(x, threshold):
if notnull(x):
if (threshold is None or
abs(x) > get_option("display.chop_threshold")):
return fmt_str % x
else:
if fmt_str.endswith("e"): # engineering format
return "0"
else:
return fmt_str % 0
else:
return self.na_rep
threshold = get_option("display.chop_threshold")
fmt_values = [_val(x, threshold) for x in self.values]
return _trim_zeros(fmt_values, self.na_rep)
def _format_strings(self):
if self.formatter is not None:
fmt_values = [self.formatter(x) for x in self.values]
else:
fmt_str = '%% .%df' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
if len(fmt_values) > 0:
maxlen = max(len(x) for x in fmt_values)
else:
maxlen = 0
too_long = maxlen > self.digits + 5
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
has_large_values = (abs_vals > 1e8).any()
has_small_values = ((abs_vals < 10 ** (-self.digits+1)) &
(abs_vals > 0)).any()
if too_long and has_large_values:
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
elif has_small_values:
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
return fmt_values
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
# we may have a tz, if so, then need to process element-by-element
# when DatetimeBlockWithTimezones is a reality this could be fixed
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if values.tz is None:
fmt_values = format_array_from_datetime(values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
fmt_values = fmt_values.tolist()
else:
values = values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or _get_format_datetime64(is_dates_only, values, date_format=self.date_format))
fmt_values = [ formatter(x) for x in self.values ]
return fmt_values
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(x,
nat_rep=nat_rep,
date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return None
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{0}'".format(result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None):
if len(strings) == 0 or justify == 'all':
return strings
_strlen = _strlen_func()
max_len = np.max([_strlen(x) for x in strings])
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
if justify == 'left':
justfunc = lambda self, x: self.ljust(x)
else:
justfunc = lambda self, x: self.rjust(x)
def just(x):
eff_len = max_len
if conf_max is not None:
if (conf_max > 3) & (_strlen(x) > max_len):
x = x[:eff_len - 3] + '...'
return justfunc(x, eff_len)
result = [just(x) for x in strings]
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros and decimal points.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and
not(any([('e' in x) or ('E' in x) for x in non_na])))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# trim decimal points
return [x[:-1] if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="%s"' % align)
if style is not None:
table += (' style="%s"' % style)
table += '><tbody>'
for i in column:
table += ('<tr><td>%s</td></tr>' % str(i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>%s</td>' % str(i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
else:
return index.name is not None
# ------------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding():
"""
Try to find the most capable encoding supported by the console.
slighly modified from the way IPython handles the same issue.
"""
import locale
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except AttributeError:
pass
# try again for something better
if not encoding or 'ascii' in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# when all else fails. this will usually be "ascii"
if not encoding or 'ascii' in encoding.lower():
encoding = sys.getdefaultencoding()
# GH3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
display_width = get_option('display.width')
# deprecated.
display_height = get_option('display.height', silent=True)
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if com.in_interactive_session():
if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.height')
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height)
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-%02d' % (-int_pow10)
else:
prefix = 'E+%02d' % int_pow10
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("% g%s")
else:
format_str = (u("%% .%if%%s") % self.accuracy)
formatted = format_str % (mant, prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
if __name__ == '__main__':
arr = np.array([746.03, 0.00, 5620.00, 1592.36])
# arr = np.array([11111111.1, 1.55])
# arr = [314200.0034, 1.4125678]
arr = np.array([327763.3119, 345040.9076, 364460.9915, 398226.8688,
383800.5172, 433442.9262, 539415.0568, 568590.4108,
599502.4276, 620921.8593, 620898.5294, 552427.1093,
555221.2193, 519639.7059, 388175.7, 379199.5854,
614898.25, 504833.3333, 560600., 941214.2857,
1134250., 1219550., 855736.85, 1042615.4286,
722621.3043, 698167.1818, 803750.])
fmt = FloatArrayFormatter(arr, digits=7)
print(fmt.get_result())
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/pandas/core/format.py
|
Python
|
mit
| 88,798
|
"""
Custom made pose attribute for simulation
"""
import math
from vector_3 import Vector3
from polar_vector import PolarVector
class Pose:
def __init__(self):
self.position = Vector3()
self.velocity = PolarVector()
|
comprobo-final-project/comprobo_final_project
|
comprobo_final_project/scripts/simulator/pose.py
|
Python
|
mit
| 241
|
# -*- coding: utf-8 -*-
#
# RADICAL-Pilot documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 3 21:55:42 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import imp
import sys
import os
import radical.utils as ru
import pprint
import subprocess as sp
script_dir = os.path.dirname(os.path.realpath(__file__))
################################################################################
cmd = "git branch | grep '*' | cut -f 2 -d \ " \
+ " | sed -e 's/readthedocs.tutorial/tutorial/g' " \
+ " | sed -e 's/readthedocs/release/g'"
mytag = sp.Popen(cmd, shell=True, stdout=sp.PIPE).stdout.read().strip()
if 'detached' in mytag :
cmd = "git branch | grep '*' | cut -f 2 -d '/' | cut -f 1 -d ')'" \
+ " | sed -e 's/readthedocs.tutorial/tutorial/g' " \
+ " | sed -e 's/readthedocs/release/g'"
mytag = sp.Popen(cmd, shell=True, stdout=sp.PIPE).stdout.read().strip()
tags.add (mytag)
################################################################################
##
print "* Generating resource configuration docs: resources.rst"
print "* using tag: %s" % mytag
try:
os.remove("{0}/resources.rst".format(script_dir))
except OSError:
pass
with open("{0}/resources.rst".format(script_dir), "w") as resources_rst:
resources_rst.write("""
.. _chapter_resources:
List of Pre-Configured Resources
================================
""")
configs = os.listdir("{0}/../../src/radical/pilot/configs/".format(script_dir))
for config in configs:
if config.endswith(".json") is False:
continue # skip all non-python files
if config.startswith("aliases") is True:
continue # skip alias files
print " * %s" % config
try:
json_data = ru.read_json_str("../../src/radical/pilot/configs/%s" % config)
except Exception, ex:
print " * JSON PARSING ERROR: %s" % str(ex)
continue
resources_rst.write("{0}\n".format(config[:-5].upper()))
resources_rst.write("{0}\n\n".format("="*len(config[:-5])))
for host_key, resource_config in json_data.iteritems():
resource_key = "%s.%s" % (config[:-5], host_key)
print " * %s" % resource_key
try:
default_queue = resource_config["default_queue"]
except Exception, ex:
default_queue = None
try:
working_dir = resource_config["default_remote_workdir"]
except Exception, ex:
working_dir = "$HOME"
try:
python_interpreter = resource_config["python_interpreter"]
except Exception, ex:
python_interpreter = None
try:
access_schemas = resource_config["schemas"]
except Exception, ex:
access_schemas = ['n/a']
resources_rst.write("{0}\n".format(host_key.upper()))
resources_rst.write("{0}\n\n".format("*"*len(host_key)))
resources_rst.write("{0}\n\n".format(resource_config["description"]))
resources_rst.write("* **Resource label** : ``{0}``\n".format(resource_key))
resources_rst.write("* **Raw config** : :download:`{0} <../../src/radical/pilot/configs/{0}>`\n".format(config))
if resource_config["notes"] != "None":
resources_rst.write("* **Note** : {0}\n".format(resource_config["notes"]))
resources_rst.write("* **Default values** for ComputePilotDescription attributes:\n\n")
resources_rst.write(" * ``queue : {0}``\n".format(default_queue))
resources_rst.write(" * ``sandbox : {0}``\n".format(working_dir))
resources_rst.write(" * ``access_schema : {0}``\n\n".format(access_schemas[0]))
resources_rst.write("* **Available schemas** : ``{0}``\n".format(', '.join(access_schemas)))
resources_rst.write("\n")
##
################################################################################
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks']
[extensions]
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
rst_epilog = """
"""
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RADICAL-Pilot'
copyright = u'2014, The RADICAL Group at Rutgers University'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
import radical.pilot
version = radical.pilot.version
release = radical.pilot.version
except Exception as e:
print 'Could not determine version: %s' % e
version = "UNKNOWN"
release = "UNKNOWN"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_themes"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
extlinks = {'issue': ('https://github.com/radical-cybertools/radical.pilot/issues/%s',
'issue ')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "armstrong"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapsiblesidebar" : "true",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = 'images/logo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'radical.pilot.doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RADICAL-Pilot.tex', u'RADICAL-Pilot Documentation',
u'The RADICAL Group at Rutgers University', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'images/logo.jpg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'radical.pilot', u'RADICAL-Pilot Documentation',
[u'The RADICAL Group at Rutgers University'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RADICAL-Pilot', u'RADICAL-Pilot Documentation',
u'The RADICAL Group at Rutgers University', 'RADICAL-Pilot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members'] #, 'undoc-members', 'show-inheritance']
|
JensTimmerman/radical.pilot
|
docs/source/conf.py
|
Python
|
mit
| 12,807
|
import os
import webapp2
from mako.template import Template
from mako.lookup import TemplateLookup
class MainHandler(webapp2.RequestHandler):
def get(self):
template_values = {
'some_foo': 'foo',
'some_bar': 'bar'
}
# the template file in our GAE app directory
path = os.path.join(os.path.dirname(__file__), 'templates/foobar.tmpl')
# make a new template instance
templ = Template(filename=path)
# unpack the dictionary to become keyword arguments and render
self.response.out.write(templ.render(**template_values))
app = webapp2.WSGIApplication([('/foobar', MainHandler)], debug=True)
|
swangui/ggrid
|
foobar.py
|
Python
|
mit
| 638
|
import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoLayerQG
from numpy import pi
params = {
'f0' : 1.0e-4,
'Lx' : 1.0e6,
'beta' : 1.5e-11,
'defRadius' : 1.5e4,
'H1' : 500.0,
'H2' : 2000.0,
'U1' : 2.5e-2,
'U2' : 0.0,
'bottomDrag' : 1.0e-7,
'nx' : 128,
'dt' : 1.0e3,
'visc' : 2.0e8,
'viscOrder' : 4.0,
'timeStepper': 'AB3',
'nThreads' : 4,
'useFilter' : False,
}
# Create the two-layer model
qg = twoLayerQG.model(**params)
qg.describe_model()
# Initial condition:
Ro = 1.0e-3
f0 = 1.0e-4
q1 = Ro*f0*np.random.standard_normal(qg.physVarShape)
q2 = Ro*f0*np.random.standard_normal(qg.physVarShape)
qg.set_q1_and_q2(q1, q2)
# Run a loop
nt = 1e3
for ii in np.arange(0, 1e3):
qg.step_nSteps(nSteps=nt, dnLog=nt)
qg.update_state_variables()
fig = plt.figure('Perturbation vorticity', figsize=(8, 8)); plt.clf()
plt.subplot(221); plt.imshow(qg.q1)
plt.subplot(222); plt.imshow(qg.q2)
plt.subplot(223); plt.imshow(np.abs(qg.soln[0:qg.ny//2, :, 0]))
plt.subplot(224); plt.imshow(np.abs(qg.soln[0:qg.ny//2, :, 1]))
plt.pause(0.01), plt.draw()
print("Close the plot to end the program")
plt.show()
|
glwagner/py2Periodic
|
tests/twoLayerQG/testTwoLayerQG.py
|
Python
|
mit
| 1,366
|
from __future__ import unicode_literals
from rbpkg.package_manager.dep_graph import DependencyGraph
from rbpkg.testing.testcases import TestCase
class DependencyGraphTests(TestCase):
"""Unit tests for rbpkg.package_manager.dep_graph.DependencyGraph."""
def test_iter_sorted_simple(self):
"""Testing DependencyGraph.iter_sorted in simple case"""
graph = DependencyGraph()
graph.add(3, [2])
graph.add(2, [1])
graph.add(1, [])
self.assertEqual(list(graph.iter_sorted()), [1, 2, 3])
def test_iter_sorted_complex(self):
"""Testing DependencyGraph.iter_sorted with complex dependencies"""
graph = DependencyGraph()
graph.add(5, [9])
graph.add(12, [9, 6, 15])
graph.add(15, [9, 2])
graph.add(9, [14, 20])
graph.add(6, [14, 2])
self.assertEqual(list(graph.iter_sorted()),
[14, 20, 9, 5, 2, 6, 15, 12])
def test_iter_sorted_circular_ref(self):
"""Testing DependencyGraph.iter_sorted with circular reference"""
graph = DependencyGraph()
graph.add(1, [2])
graph.add(2, [1])
self.assertEqual(list(graph.iter_sorted()), [2, 1])
|
reviewboard/rbpkg
|
rbpkg/package_manager/tests/test_dep_graph.py
|
Python
|
mit
| 1,217
|
import docker
from cargo.container import Container
from cargo.image import Image
# this is a hack to get `__getattribute__` working for a few reserved properties
RESERVED_METHODS = ['containers', '_client', 'images', 'info', 'start', 'stop']
class Dock(object):
"""Wrapper class for `docker-py` Client instances"""
def __init__(self, *args, **kw):
super(Dock, self).__init__()
self._client = docker.Client(*args, **kw)
def __repr__(self):
return '<Dock [%s] (%s)>' % (self.base_url, self.version().get('Version'))
def __getattribute__(self, x):
client = super(Dock, self).__getattribute__('_client')
# return client attribute if not a magic method or reserved attr
legal = not x.startswith('_') and not(x in RESERVED_METHODS)
if hasattr(client, x) and legal:
return client.__getattribute__(x)
return super(Dock, self).__getattribute__(x)
@property
def containers(self, *args, **kw):
return [Container(x) for x in self._client.containers(*args, **kw)]
@property
def _containers(self, *args, **kw):
return [x for x in self._client.containers(*args, **kw)]
@property
def images(self, *args, **kw):
return [Image(x) for x in self._client.images(*args, **kw)]
@property
def _images(self, *args, **kw):
return [x for x in self._client.images(*args, **kw)]
@property
def info(self):
return self._client.info()
@property
def total_num_containers(self):
info = self.info
return int(info.get('Containers'))
@property
def total_num_images(self):
info = self.info
return int(info.get('Images'))
@property
def total_num_goroutines(self):
info = self.info
return int(info.get('NGoroutines'))
@property
def memory_limit(self):
info = self.info
return info.get('MemoryLimit')
@property
def debug(self):
info = self.info
return info.get('Debug')
def running(self, container):
"""Returns True if dock is running container, else False
Accepts container id's and Container objects
"""
container_ids = [x.container_id for x in self.containers]
if isinstance(container, Container):
return container.container_id in containder_ids
elif isinstance(container, basestring):
return container in container_ids
raise TypeError('expected container id as string or Container object.')
def start(self, container, *args, **kw):
if isinstance(container, Container):
cid = container.container_id
elif isinstance(container, basestring):
cid = container
return self._client.start(cid, *args, **kw)
def stop(self, container, *args, **kw):
if isinstance(container, Container):
cid = container.container_id
elif isinstance(container, basestring):
cid = container
return self._client.stop(cid, *args, **kw)
|
mvanveen/cargo
|
cargo/dock.py
|
Python
|
mit
| 2,845
|
"""
Copyright: (c) 2012-2014 Artem Nezvigin <artem@artnez.com>
License: MIT, see LICENSE for details
"""
from functools import wraps
from flask import g, request, session, render_template, url_for, redirect
from faceoff.models.user import find_user
def templated(template_name=None):
"""
Automatically renders a template named after the current endpoint. Will
also render the name provided if given.
"""
def closure(f):
@wraps(f)
def decorator(*args, **kwargs):
template = template_name
response = f(*args, **kwargs)
if response is None:
response = {}
elif not isinstance(response, dict):
return response
if template is None:
template = '%s.html' % request.endpoint
return render_template(template, **response)
return decorator
return closure
def authenticated(f):
"""
Asserts that an existing logged-in user session is active. If not,
redirects to the authenticate gate.
"""
@wraps(f)
def decorator(*args, **kwargs):
user_id = session.get('user_id')
if user_id is None:
return redirect(url_for('gate'))
user = find_user(id=user_id)
if user is None:
return redirect(url_for('gate'))
g.current_user = user
return f(*args, **kwargs)
return decorator
|
artnez/faceoff
|
faceoff/helpers/decorators.py
|
Python
|
mit
| 1,421
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of holmesalf.
# https://github.com/holmes-app/holmes-alf
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2014 Pablo Aguiar scorphus@gmail.com
from holmesalf import BaseAuthNZWrapper
from alf.client import Client as AlfSyncClient
from tornadoalf.client import Client as AlfAsyncClient
class AlfAuthNZWrapper(BaseAuthNZWrapper):
"""This class gathers authentication and authorization
for some of the services used by Holmes"""
def __init__(self, config):
self.config = config
self._sync_client = None
self._async_client = None
@property
def sync_client(self):
"""Synchronous OAuth 2.0 Bearer client"""
if not self._sync_client:
self._sync_client = AlfSyncClient(
token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'),
client_id=self.config.get('OAUTH_CLIENT_ID'),
client_secret=self.config.get('OAUTH_CLIENT_SECRET')
)
return self._sync_client
@property
def async_client(self):
"""Asynchronous OAuth 2.0 Bearer client"""
if not self._async_client:
self._async_client = AlfAsyncClient(
token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'),
client_id=self.config.get('OAUTH_CLIENT_ID'),
client_secret=self.config.get('OAUTH_CLIENT_SECRET')
)
return self._async_client
|
holmes-app/holmes-alf
|
holmesalf/wrapper.py
|
Python
|
mit
| 1,536
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
read-bookmark.py
~~~~~~~~~~~~~~~~
This module is an example of how to harness the Readability API w/ oAuth.
This module expects the following environment variables to be set:
- READABILITY_CONSUMER_KEY
- READABILITY_CONSUMER_SECRET
- READABILITY_ACCESS_TOKEN
- READABILITY_ACCESS_SECRET
Once you have your consumer keys setup, run the following to get your
access tokens::
$ ./login-xauth.py <username> <password>
"""
import sys
from HTMLParser import HTMLParser
from ext import setup_rdd
class MLStripper(HTMLParser):
"""HTMLParser w/ overrides for stripping text out."""
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
"""A super low-tech and debatably irresponsible attempt to turn HTML
into plain text."""
s = MLStripper()
s.feed(html)
data = s.get_data()
for s in ('\n\n\n\n\n', '\n\n\n\n', '\n\n\n', '\n', '\t'):
data = data.replace(s, '')
data = data.replace(' ', '')
return data
def main():
rdd = setup_rdd()
bookmarks = rdd.get_me().bookmarks(limit=10)
print 'Recent Bookmarks'
print '----------------\n'
for i, mark in enumerate(bookmarks):
print '%01d: %s (%s)' % (i, mark.article.title, mark.article.domain)
try:
selection = raw_input('\nRead Article (0-9)? ')
selection = int(selection)
assert (selection < 10) and (selection >= 0)
except (ValueError, AssertionError):
print >> sys.stderr, '\nEnter a number within 0-9, if you don\'t mind.'
except KeyboardInterrupt:
print >> sys.stderr, '\nWell, fine.'
sys.exit()
article = bookmarks[selection].article
article = rdd.get_article(article.id)
print article.title
print '-' * len(article.title) + '\n'
print strip_tags(article.content)
if __name__ == '__main__':
main()
|
alexwaters/python-readability-api
|
examples/read-bookmarks.py
|
Python
|
mit
| 2,034
|
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
|
hubinary/flasky
|
app/main/errors.py
|
Python
|
mit
| 255
|
from unittest import TestCase
from paramiko import SSHException
from pyinfra.api import Config, State
from pyinfra.api.connect import connect_all
from pyinfra.api.exceptions import NoGroupError, NoHostError, PyinfraError
from ..paramiko_util import PatchSSHTestCase
from ..util import make_inventory
class TestInventoryApi(TestCase):
def test_inventory_creation(self):
inventory = make_inventory()
# Check length
assert len(inventory.hosts) == 2
# Get a host
host = inventory.get_host('somehost')
assert host.data.ssh_user == 'vagrant'
# Check our group data
assert inventory.get_group_data('test_group') == {
'group_data': 'hello world',
}
def test_tuple_host_group_inventory_creation(self):
inventory = make_inventory(
hosts=[
('somehost', {'some_data': 'hello'}),
],
tuple_group=([
('somehost', {'another_data': 'world'}),
], {
'tuple_group_data': 'word',
}),
)
# Check host data
host = inventory.get_host('somehost')
assert host.data.some_data == 'hello'
assert host.data.another_data == 'world'
# Check group data
assert host.data.tuple_group_data == 'word'
def test_host_and_group_errors(self):
inventory = make_inventory()
with self.assertRaises(NoHostError):
inventory.get_host('i-dont-exist')
with self.assertRaises(NoGroupError):
inventory.get_group('i-dont-exist')
class TestStateApi(PatchSSHTestCase):
def test_fail_percent(self):
inventory = make_inventory((
'somehost',
('thinghost', {'ssh_hostname': SSHException}),
'anotherhost',
))
state = State(inventory, Config(FAIL_PERCENT=1))
# Ensure we would fail at this point
with self.assertRaises(PyinfraError) as context:
connect_all(state)
assert context.exception.args[0] == 'Over 1% of hosts failed (33%)'
# Ensure the other two did connect
assert len(state.active_hosts) == 2
|
Fizzadar/pyinfra
|
tests/test_api/test_api.py
|
Python
|
mit
| 2,193
|
import click
import mock
import pytest
from click.testing import CliRunner
from sigopt.cli import cli
class TestRunCli(object):
@pytest.mark.parametrize('opt_into_log_collection', [False, True])
@pytest.mark.parametrize('opt_into_cell_tracking', [False, True])
def test_config_command(self, opt_into_log_collection, opt_into_cell_tracking):
runner = CliRunner()
log_collection_arg = '--enable-log-collection' if opt_into_log_collection else '--no-enable-log-collection'
cell_tracking_arg = '--enable-cell-tracking' if opt_into_cell_tracking else '--no-enable-cell-tracking'
with mock.patch('sigopt.cli.commands.config._config.persist_configuration_options') as persist_configuration_options:
result = runner.invoke(cli, [
'config',
'--api-token=some_test_token',
log_collection_arg,
cell_tracking_arg,
])
persist_configuration_options.assert_called_once_with({
'api_token': 'some_test_token',
'code_tracking_enabled': opt_into_cell_tracking,
'log_collection_enabled': opt_into_log_collection,
})
assert result.exit_code == 0
assert result.output == ''
|
sigopt/sigopt-python
|
test/cli/test_cli_config.py
|
Python
|
mit
| 1,165
|
import unittest
import ezgal.zf_grid
import numpy as np
import math
# I put the test data for the zf_grid tests in
# tests.zf_grid instead of in tests because
# there is a lot of data but it is all
# specific for this test.
import tests.zf_grid
class test_get_rest_mags(tests.zf_grid.test_zf_grid):
def test_get_rest_mags(self):
self.assertTrue(np.allclose(
self.zf_grid.get_rest_mags(tests.zf_grid.test_zs),
[0.275, 0.75, 1.25, 1.75, 2.25, 2.778], 1e-4))
def test_get_rest_mags_lower_bound(self):
# if we go lower than our lowest grided z then
# we should get a nan
vals = self.zf_grid.get_rest_mags([-1])
self.assertTrue(math.isnan(vals[0]))
def test_get_rest_mags_upper_bound(self):
# if we go lower than our lowest grided z then
# we should get a nan
vals = self.zf_grid.get_rest_mags([4])
self.assertTrue(math.isnan(vals[0]))
if __name__ == '__main__':
unittest.main()
|
cmancone/ezgal
|
tests/zf_grid/test_get_rest_mags.py
|
Python
|
mit
| 998
|
from __future__ import absolute_import
import os
import ming
from ming import Session
from ming.odm import ThreadLocalODMSession
from ming import create_datastore
from depot.fields.ming import DepotExtension
mainsession = Session()
DBSession = ThreadLocalODMSession(mainsession, extensions=(DepotExtension, ))
database_setup = False
datastore = None
def setup_database():
global datastore, database_setup
if not database_setup:
datastore = create_datastore(os.environ.get('MONGOURL', 'mim:///depottest'))
mainsession.bind = datastore
ming.odm.Mapper.compile_all()
def clear_database():
global datastore, database_setup
if not database_setup:
setup_database()
try:
# On MIM drop all data
datastore.conn.drop_all()
except TypeError:
# On MongoDB drop database
datastore.conn.drop_database(datastore.db)
|
amol-/depot
|
tests/base_ming.py
|
Python
|
mit
| 896
|
from __future__ import absolute_import, division, print_function
import ast
from jaspyx.ast_util import ast_load, ast_call
from jaspyx.visitor import BaseVisitor
class BinOp(BaseVisitor):
def visit_BinOp(self, node):
attr = getattr(self, 'BinOp_%s' % node.op.__class__.__name__, None)
attr(node.left, node.right)
for key, value in {
'Add': '+',
'Sub': '-',
'Mult': '*',
'Div': '/',
'Mod': '%',
'BitAnd': '&',
'BitOr': '|',
'BitXor': '^',
'LShift': '<<',
'RShift': '>>',
}.items():
def gen_op(op):
def f_op(self, left, right):
self.group([left, op, right])
return f_op
exec('BinOp_%s = gen_op("%s")' % (key, value))
def BinOp_Pow(self, left, right):
pow_func = ast_load('Math.pow')
self.visit(ast_call(pow_func, left, right))
def BinOp_FloorDiv(self, left, right):
floor = ast_load('Math.floor')
self.visit(ast_call(floor, ast.BinOp(left, ast.Div(), right)))
|
ztane/jaspyx
|
jaspyx/visitor/binop.py
|
Python
|
mit
| 1,067
|
#!/usr/bin/python3
import argparse
import collections
import json
import string
import sys
header_template = """
#ifndef ASPARSERATIONS_GENERATED_${class_name}_H_
#define ASPARSERATIONS_GENERATED_${class_name}_H_
#include <array>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>
$header_front
$begin_namespace
enum class Token
{
$tokens
};
enum class Nonterminal
{
$nonterminals
};
enum class Production
{
$productions
};
struct Lexer_State
{
const char* begin;
const char* end;
unsigned int lines;
const char* last_newline;
};
Lexer_State next(const Lexer_State&);
/**
*/
class Node
{
public:
Node(const $payload&, const Lexer_State&);
Node(const $payload&, std::vector<std::unique_ptr<Node>>);
const $payload& payload() const;
const std::vector<std::unique_ptr<Node>>& children() const;
const Lexer_State& state() const;
virtual ~Node() = default;
private:
$payload m_payload;
std::vector<std::unique_ptr<Node>> m_children;
Lexer_State m_state;
};
class $class_name
{
public:
$class_name();
std::unique_ptr<Node> parse(const std::string&, $lexer&, $callback&);
static std::string nonterminal_to_string(Nonterminal);
static std::string production_to_string(Production);
virtual ~$class_name() = default;
private:
struct Mangled_Production
{
const Nonterminal nonterminal;
const Production production;
unsigned int child_count;
};
struct Productions
{
Productions();
$mangled_productions_header
};
struct State
{
std::map<Token,std::pair<const State*,std::set<const Mangled_Production*>>>
actions;
std::map<Nonterminal,const State*> gotos;
};
std::vector<State> m_states;
std::vector<std::pair<std::unique_ptr<Node>,const State*>> m_stack;
std::unique_ptr<Productions> m_productions;
void m_process(const State&, const Lexer_State&, $lexer&, $callback&, std::unique_ptr<Node>&);
void m_reduce(const Mangled_Production&, $callback&, std::unique_ptr<Node>&);
};
$end_namespace
#endif
"""
src_template = """
#include <algorithm>
#include <stdexcept>
#include <utility>
#include "../include/$class_name.hpp"
$src_front
$namespace::Lexer_State $namespace::next(const $namespace::Lexer_State& ls)
{
$namespace::Lexer_State ls_prime = {
ls.end,
ls.end,
ls.lines,
ls.last_newline
};
return ls_prime;
}
$namespace::Node::Node(const $payload& payload,
const $namespace::Lexer_State& state)
: m_payload(payload), m_state(state) {}
$namespace::Node::Node(const $payload& payload,
std::vector<std::unique_ptr<Node>> children)
{
if(children.empty())
throw std::runtime_error("Zero children,"
"call Node(const char*, const char*) instead");
m_payload = payload;
m_children = std::move(children);
m_state = $namespace::Lexer_State {
m_children.front()->state().begin,
m_children.back()->state().end,
m_children.back()->state().lines,
m_children.back()->state().last_newline
};
}
const $payload& $namespace::Node::payload() const
{
return m_payload;
}
const std::vector<std::unique_ptr<$namespace::Node>>&
$namespace::Node::children() const
{
return m_children;
}
const $namespace::Lexer_State& $namespace::Node::state() const
{
return m_state;
}
$namespace::$class_name::Productions::Productions()
: $mangled_productions_src
{
}
$namespace::$class_name::$class_name()
: m_productions(new Productions()), m_states($state_count)
{
$states
}
std::unique_ptr<$namespace::Node>
$namespace::$class_name::parse(const std::string& input,
$lexer& lexer,
$callback& callback)
{
std::unique_ptr<Node> root;
m_process(m_states.front(),
$namespace::Lexer_State{input.data(), input.data(),
1, input.data() - 1},
lexer, callback, root);
while(!m_stack.empty()) {
m_process(*m_stack.back().second,
$namespace::next(m_stack.back().first->state()),
lexer, callback, root);
}
return root;
}
std::string
$namespace::$class_name::nonterminal_to_string($namespace::Nonterminal nt)
{
switch(nt) {
$nonterminals_to_strings
}
throw std::runtime_error("Unknown nonterminal");
}
std::string
$namespace::$class_name::production_to_string($namespace::Production p)
{
switch(p) {
$productions_to_strings
}
throw std::runtime_error("Unknown production");
}
void $namespace::$class_name::m_process(
const $namespace::$class_name::State& state,
const $namespace::Lexer_State& lex_state,
$lexer& lexer,
$callback& callback,
std::unique_ptr<$namespace::Node>& root)
{
$namespace::Lexer_State err;
for(auto& action : state.actions) {
auto result = lexer.expect(action.first, lex_state);
err = result.first;
if(result.second) {
if(action.second.first != nullptr) {
try {
m_stack.emplace_back(
std::unique_ptr<$namespace::Node>(new Node(callback.call(action.first,
std::string(result.first.begin,
result.first.end)),
result.first)),
action.second.first
);
} catch(std::runtime_error& e) {
throw std::runtime_error(std::to_string(err.lines) + ":"
+ std::to_string(err.end - 1 - err.last_newline) + ": " + e.what());
}
return;
}
if(!action.second.second.empty()) {
m_reduce(**action.second.second.begin(), callback, root);
return;
}
}
}
throw std::runtime_error("Failed parse: " + std::to_string(err.lines)
+ ":" + std::to_string(err.end - err.last_newline));
}
void $namespace::$class_name::m_reduce(
const $namespace::$class_name::Mangled_Production& production,
$callback& callback,
std::unique_ptr<$namespace::Node>& root)
{
if(m_stack.empty()) throw std::runtime_error("Can't reduce empty stack");
std::unique_ptr<$namespace::Node> node = nullptr;
if(production.child_count == 0) {
node = std::unique_ptr<$namespace::Node>(new Node(callback.call(production.nonterminal,
production.production,
{}),
$namespace::next(m_stack.back().first->state())));
} else {
std::vector<std::unique_ptr<Node>> popped;
for(int i = 0; i < production.child_count; ++i) {
if(m_stack.empty()) throw std::runtime_error("Stack underflow");
popped.push_back(std::move(m_stack.back().first));
m_stack.pop_back();
}
std::reverse(popped.begin(), popped.end());
try {
auto temp = callback.call(production.nonterminal, production.production, popped);
node = std::unique_ptr<$namespace::Node>(new Node(temp, std::move(popped)));
} catch(std::runtime_error& e) {
throw std::runtime_error(std::string("Error: ") + e.what());
}
}
if(production.nonterminal == Nonterminal::accept_) {
root = std::move(node);
return;
}
const State* state;
if(m_stack.empty()) {
state = &m_states[0];
} else {
state = m_stack.back().second;
}
auto iter = state->gotos.find(production.nonterminal);
if(iter == m_stack.back().second->gotos.end()) {
throw std::runtime_error("Unknown nonterminal");
}
m_stack.emplace_back(std::move(node), iter->second);
}
"""
def gen_namespace_decls(namespaces):
begin = ""
end = ""
for namespace in namespaces:
begin += "namespace " + namespace + " {\n"
end = "} // " + namespace + "\n" + end
return {"begin_namespace" : begin, "end_namespace" : end}
def gen_production_list(grammar):
names = set()
for name,productions in grammar["nonterminals"].items():
for prodname,wildcard in productions.items():
names.add(prodname)
lines = ",\n ".join(names)
return lines
def gen_mangled_production_list_header(grammar):
lines = ""
for name,productions in grammar["nonterminals"].items():
for prodname,symbols in productions.items():
lines += "Mangled_Production " + name + "_" + prodname + ";\n "
return lines
def gen_header(template, table, config):
tokens = ",\n ".join(table["grammar"]["tokens"])
nonterminal_list = []
for name, wildcard in table["grammar"]["nonterminals"].items():
nonterminal_list.append(name)
nonterminals = ",\n ".join(nonterminal_list)
mangled_productions = gen_mangled_production_list_header(table["grammar"])
productions = gen_production_list(table["grammar"])
# Lost in stupid parentheses
return string.Template( \
string.Template( \
string.Template(template) \
.safe_substitute(config)) \
.safe_substitute(tokens=tokens, nonterminals=nonterminals, \
mangled_productions_header=mangled_productions, \
productions=productions,
state_count=str(len(table["table"])))) \
.substitute(gen_namespace_decls(config["namespace"]))
def gen_namespace_prefix(namespaces):
return "::".join(namespaces)
def gen_mangled_productions_src(grammar):
lines = []
for name,productions in grammar["nonterminals"].items():
for prodname,symbols in productions.items():
lines.append(name + "_" + prodname + " {Nonterminal::"\
+ name + ", " + "Production::" + prodname + ", " \
+ str(len(symbols)) + "}")
return ",\n ".join(lines)
def gen_state(template, state, config):
actions = []
gotos = []
for token, action in state["actions"].items():
action_str = "{\n Token::" + token + ", {"
if action["shift"] is None:
action_str += "nullptr, {\n "
else:
action_str += "&m_states["+str(action["shift"])+"], {\n "
reduce_strs = map(lambda x :
"&m_productions->" + x["nonterminal"]
+ "_" + x["production"],\
action["reductions"])
reduce_str = ",\n ".join(reduce_strs)
action_str += reduce_str + "\n }}\n }"
actions.append(action_str)
for nonterminal, index in state["gotos"].items():
goto_str = "{Nonterminal::" + nonterminal \
+ ", &m_states[" + str(index) + "]}"
gotos.append(goto_str)
actions_str = ",\n ".join(actions)
gotos_str = ",\n ".join(gotos)
return "m_states[" + str(state["index"]) \
+ "] = State {\n { // actions\n " + actions_str + "\n }" \
+ ",\n { // gotos \n " + gotos_str + "\n }\n };"
def gen_nonterminal_to_strings(nonterminal):
name, wildcard = nonterminal
return "case Nonterminal::" + name + ": return \"" + name + "\";"
def gen_productions_to_strings(grammar):
names = set()
for name,productions in grammar["nonterminals"].items():
for prodname,wildcard in productions.items():
names.add(prodname)
lines = map(lambda p: "case Production::" + p + ": return \"" + p \
+ "\";",
names)
return "\n ".join(lines)
def gen_src(template, table, config):
namespace_prefix = gen_namespace_prefix(config["namespace"])
states = map(lambda x : gen_state(template, x, config), table["table"])
states_text = "\n ".join(states)
nonterminals_to_strings = "\n ".join(map(gen_nonterminal_to_strings,\
table["grammar"]["nonterminals"]\
.items()))
return string.Template(string.Template(template) \
.safe_substitute(namespace=namespace_prefix, states=states_text, \
state_count=len(table["table"]),\
nonterminals_to_strings=nonterminals_to_strings,\
productions_to_strings\
=gen_productions_to_strings(table["grammar"]),\
mangled_productions_src=\
gen_mangled_productions_src(table["grammar"]))) \
.safe_substitute(config)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("json")
argparser.add_argument("config")
argparser.add_argument("dest")
args = argparser.parse_args()
table = json.load(open(args.json, "r"),\
object_pairs_hook=collections.OrderedDict)
config = json.load(open(args.config, "r"))
dest = args.dest
header_file = open(dest + "/include/" + config["class_name"] + ".hpp", "w+")
src_file = open(dest + "/src/" + config["class_name"] + ".cpp", "w+")
header_file.write(gen_header(header_template, table, config))
src_file.write(gen_src(src_template, table, config))
header_file.close()
src_file.close()
if __name__ == '__main__':
main()
|
TheAspiringHacker/Asparserations
|
bootstrap/parser_gen.py
|
Python
|
mit
| 13,102
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
return "" if n == 0 else self.convertToTitle((n - 1) / 26) + chr((n - 1) % 26 + ord('A'))
|
Lanceolata/code-problems
|
python/leetcode/Question_168_Excel_Sheet_Column_Title.py
|
Python
|
mit
| 255
|
import requests
import json
import time
import subprocess
import re
import os
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
from test_framework.smtp_server import SMTP_DUMPFILE
class SMTPTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[1]
bob = self.nodes[2]
# post profile for alice
with open('testdata/'+ self.vendor_version +'/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# configure SMTP notifications
time.sleep(4)
api_url = alice["gateway_url"] + "ob/settings"
smtp = {
"smtpSettings" : {
"notifications": True,
"serverAddress": "0.0.0.0:1024",
"username": "usr",
"password": "passwd",
"senderEmail": "openbazaar@test.org",
"recipientEmail": "user.openbazaar@test.org"
}
}
r = requests.post(api_url, data=json.dumps(smtp, indent=4))
if r.status_code == 404:
raise TestFailure("SMTPTest - FAIL: Settings POST endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("SMTPTest - FAIL: Settings POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check SMTP settings
api_url = alice["gateway_url"] + "ob/settings"
r = requests.get(api_url)
if r.status_code == 404:
raise TestFailure("SMTPTest - FAIL: Settings GET endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("SMTPTest - FAIL: Settings GET failed. Reason: %s", resp["reason"])
# check notifications
addr = "0.0.0.0:1024"
class_name = "test_framework.smtp_server.SMTPTestServer"
proc = subprocess.Popen(["python", "-m", "smtpd", "-n", "-c", class_name, addr])
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("SMTPTest - FAIL: Address endpoint not found")
else:
raise TestFailure("SMTPTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(20)
# post listing to alice
with open('testdata/'+ self.vendor_version +'/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
if self.vendor_version == "v4":
listing_json["metadata"]["priceCurrency"] = "t" + self.cointype
else:
listing_json["item"]["priceCurrency"]["code"] = "t" + self.cointype
listing_json["metadata"]["acceptedCurrencies"] = ["t" + self.cointype]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("SMTPTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("SMTPTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ipns/" + alice["peerId"] + "/listings.json"
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("SMTPTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/'+ self.buyer_version +'/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["paymentCoin"] = "t" + self.cointype
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("SMTPTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("SMTPTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# fund order
spend = {
"currencyCode": "T" + self.cointype,
"address": payment_address,
"amount": payment_amount["amount"],
"feeLevel": "NORMAL",
"requireAssociateOrder": False
}
if self.buyer_version == "v4":
spend["amount"] = payment_amount
spend["wallet"] = "T" + self.cointype
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("SMTPTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("SMTPTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
proc.terminate()
# check notification
expected = '''From: openbazaar@test.org
To: user.openbazaar@test.org
MIME-Version: 1.0
Content-Type: text/html; charset=UTF-8
Subject: [OpenBazaar] Order received
You received an order "Ron Swanson Tshirt".
Order ID: QmNiPgKNq27qQE8fRxMbtDfRcFDEYMH5wDRgdqtqoWBpGg
Buyer: Qmd5qDpcYkHCmkj9pMXU9TKBqEDWgEmtoHD5xjdJgumaHg
Thumbnail: QmXSEqXLCzpCByJU4wqbJ37TcBEj77FKMUWUP1qLh56847
Timestamp: 1487699826
'''
expected_lines = [e for e in expected.splitlines() if not e.startswith('Timestamp:') and not e.startswith('Order ID:')]
with open(SMTP_DUMPFILE, 'r') as f:
res_lines = [l.strip() for l in f.readlines() if not l.startswith('Timestamp') and not l.startswith('Order ID:')]
if res_lines != expected_lines:
os.remove(SMTP_DUMPFILE)
raise TestFailure("SMTPTest - FAIL: Incorrect mail data received")
os.remove(SMTP_DUMPFILE)
print("SMTPTest - PASS")
if __name__ == '__main__':
print("Running SMTPTest")
SMTPTest().main(["--regtest", "--disableexchangerates"])
|
OpenBazaar/openbazaar-go
|
qa/smtp_notification.py
|
Python
|
mit
| 6,891
|
__author__ = 'djw'
class FieldNodeItem(object):
"""
An item built on a player's field board, held within a node/cell on that board
"""
def __init__(self):
self.cattle = 0
self.boars = 0
self.sheep = 0
self.grain = 0
self.vegetables = 0
def update_animals(self, animal, count):
if animal == 'sheep':
self.sheep += count
if animal == 'boar':
self.boars += count
if animal == 'cattle':
self.cattle += count
def update_crop(self, crop):
if crop == 'grain':
self.grain += 3
if crop == 'vegetables':
self.vegetables += 2
@property
def has_resources(self):
num_resources = self.cattle + self.boars + self.sheep + self.grain + self.vegetables
return num_resources > 0
def update(self):
""" Called every turn """
raise NotImplementedError()
def harvest(self, player):
""" Initiates and controls the harvest process for this item """
return
def score(self):
"""
Calculates and returns the current score of this item.
"""
raise NotImplementedError()
def describe(self):
"""
Return the current state of this item as a string that will be drawn
"""
raise NotImplementedError()
class RoomItem(FieldNodeItem):
"""
A single room on a player's field board
"""
MATERIAL_CHOICES = frozenset(['wood', 'stone', 'clay'])
def __init__(self, material='wood'):
super(RoomItem, self).__init__()
if material not in self.MATERIAL_CHOICES:
raise ValueError('Invalid material %s not in %s' % (material, self.MATERIAL_CHOICES))
self.material = material
def set_material(self, material):
if material not in self.MATERIAL_CHOICES:
raise ValueError('Invalid material %s not in %s' % (material, self.MATERIAL_CHOICES))
self.material = material
def update(self):
pass # rooms do not update from turn to turn
def score(self):
if self.material == 'wood':
return 0
elif self.material == 'clay':
return 1
elif self.material == 'stone':
return 2
def describe(self):
return u"\u25A1" # white square (unicode)
class StableItem(FieldNodeItem):
"""
A stable on the player's field board
"""
def update(self):
pass
def score(self):
return 1
def describe(self):
return u"\u039E" # Greek letter Xi (unicode)
class PlowedFieldItem(FieldNodeItem):
"""
A space on the player's field board that has been plowed and can be sowed upon
"""
def update(self):
pass
def score(self):
return 1
def harvest(self, player):
# harvest crops
if self.grain > 0:
self.grain -= 1
player.grain += 1
elif self.vegetables > 0:
self.vegetables -= 1
player.vegetable += 1
def describe(self):
return u"\u25A7" # Square with upper left to lower right fill
|
sourlows/pyagricola
|
src/field/node_item.py
|
Python
|
mit
| 3,158
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
current = head
previous = None
while current is not None:
temp = current.next
current.next = previous
previous = current
current = temp
return previous
|
danielgarm/Public-Algorithms
|
LeetCode/0206 - Reverse Linked List.py
|
Python
|
mit
| 472
|
import warnings
class Yaku:
yaku_id = None
tenhou_id = None
name = None
han_open = None
han_closed = None
is_yakuman = None
def __init__(self, yaku_id=None):
self.tenhou_id = None
self.yaku_id = yaku_id
self.set_attributes()
def __str__(self):
return self.name
def __repr__(self):
# for calls in array
return self.__str__()
def is_condition_met(self, hand, *args):
"""
Is this yaku exists in the hand?
:param: hand
:param: args: some yaku requires additional attributes
:return: boolean
"""
raise NotImplementedError
def set_attributes(self):
"""
Set id, name, han related to the yaku
"""
raise NotImplementedError
@property
def english(self):
warnings.warn("Use .name attribute instead of .english attribute", DeprecationWarning)
return self.name
@property
def japanese(self):
warnings.warn("Use .name attribute instead of .japanese attribute", DeprecationWarning)
return self.name
|
MahjongRepository/mahjong
|
mahjong/hand_calculating/yaku.py
|
Python
|
mit
| 1,121
|
# Copyright (C) 2016 Fan Long, Martin Rianrd and MIT CSAIL
# Prophet
#
# This file is part of Prophet.
#
# Prophet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prophet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prophet. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from os import system, chdir, getcwd
from sys import argv
import subprocess
build_cmd = argv[1];
dep_dir = argv[2];
src_dir = argv[3];
test_dir = argv[4];
rev = argv[5];
if (len(argv) < 7):
out_dir = test_dir + "-" + rev;
else:
out_dir = argv[6];
work_dir = "__tmp" + rev;
system("cp -rf " + src_dir + " " + work_dir);
ori_dir = getcwd();
chdir(work_dir);
system("git checkout -f " + rev);
system("git clean -f -d");
chdir(ori_dir);
system(build_cmd + " -p " + dep_dir + " " + work_dir);
system("mv " + work_dir + "/test " + work_dir+"/ori_test");
system("cp -rf " + test_dir + " " + work_dir + "/test");
chdir(work_dir + "/test");
system("GENEXPOUT=1 CMPEXPOUT=0 make check");
chdir(ori_dir);
print "Goint to generate testdir for revision " + rev + " case: " + out_dir;
system("cp -rf " + test_dir + " " + out_dir);
system("cp -rf " + work_dir + "/test/*.exp " + work_dir + "/test/*.tol " + out_dir+"/");
system("rm -rf " + work_dir);
|
jyi/ITSP
|
prophet-gpl/tools/libtiff-prepare-test.py
|
Python
|
mit
| 1,700
|