text stringlengths 4 1.02M | meta dict |
|---|---|
from django.forms import ModelForm, PasswordInput, TextInput, EmailInput, DateInput, Select, NumberInput, Textarea, SelectDateWidget, ValidationError, CharField
from django.conf import settings
from django.contrib.auth.models import User
import datetime
from .models import Userprofile
class UserForm(ModelForm):
password = CharField(widget=PasswordInput(attrs={'class': 'form-control'}))
confirm_password = CharField(widget=PasswordInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ['username',
'password',
'confirm_password',
'first_name',
'last_name',
'email'
]
widgets = {
'username': TextInput(attrs={'class': 'form-control'}),
'password': PasswordInput(attrs={'masked': True, 'class': 'form-control'}),
'confirm_password': PasswordInput(attrs={'masked': True, 'class': 'form-control'}),
'first_name': TextInput(attrs={'class': 'form-control'}),
'last_name': TextInput(attrs={'class': 'form-control'}),
'email': EmailInput(attrs={'class': 'form-control'}),
}
def clean(self):
cleaned_data = super(UserForm, self).clean()
password = cleaned_data.get("password")
confirm_password = cleaned_data.get("confirm_password")
if password != confirm_password:
raise ValidationError(
"'Password' and 'Confirm Password' fields do not match!"
)
class UserprofileForm(ModelForm):
class Meta:
year = datetime.datetime.now().year
model = Userprofile
fields = ['dateofbirth',
'gender',
'height',
'weight',
'notes'
]
widgets = {
'dateofbirth': SelectDateWidget(empty_label=("Choose Year", "Choose Month", "Choose Day"), years=range(1900, year+1), attrs={'class': 'form-control'}),
'gender': Select(attrs={'masked': True, 'class': 'form-control'}),
'height': NumberInput(attrs={'class': 'form-control'}),
'weight': NumberInput(attrs={'class': 'form-control'}),
'notes': Textarea(attrs={'class': 'form-control'}),
}
class ChangepasswordForm(ModelForm):
password = CharField(widget=PasswordInput(attrs={'class': 'form-control'}))
confirm_password = CharField(widget=PasswordInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ['password',
'confirm_password',
]
widgets = {
'password': PasswordInput(attrs={'masked': True, 'class': 'form-control'}),
'confirm_password': PasswordInput(attrs={'masked': True, 'class': 'form-control'})
}
def clean(self):
cleaned_data = super(ChangepasswordForm, self).clean()
password = cleaned_data.get("password")
confirm_password = cleaned_data.get("confirm_password")
if password != confirm_password:
raise ValidationError(
"'Password' and 'Confirm Password' fields do not match!"
)
| {
"content_hash": "f8cde137d4bf4ddfa918836e55361880",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 163,
"avg_line_length": 35.819148936170215,
"alnum_prop": 0.5521235521235521,
"repo_name": "TalatCikikci/Fall2016Swe573_HealthTracker",
"id": "5f4db899df97c0e09b1ec607731e7b84af24981c",
"size": "3367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitster/healthtracker/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "368"
},
{
"name": "HTML",
"bytes": "27814"
},
{
"name": "Python",
"bytes": "56138"
}
],
"symlink_target": ""
} |
from ..config import Config
from kao_command.args import Arg
class SetDir:
""" Command to set the current editor """
description = "Set the path to the devlog parent directory"
args = [Arg('path', action='store', help="The Path to the directory where logs will be stored")]
def run(self, *, path):
""" Set the Log Directory """
Config.setLogDir(path) | {
"content_hash": "ca0725f60238b39fa52dc01e37e5c445",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 100,
"avg_line_length": 36.18181818181818,
"alnum_prop": 0.6381909547738693,
"repo_name": "cloew/DevLog",
"id": "059fd7dc5d5fe51d5bacfe04c0ea27b419a3ad0f",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devlog/commands/set_dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27161"
}
],
"symlink_target": ""
} |
print("*************************************\n Machine Learning In Action - Code\n")
print(" Part : 1 - Classification")
print(" Chapter : 2.3 - Example: a Handwriting Recognition System")
print(" Index : 4\n")
print(" Page : 28\n")
print(" By : Troy Lewis\n*************************************\n\n")
print("=====================================\nStep 01: Import kNN.py and test img2vector\n")
import kNN
test_vector = kNN.img2vector('digits/test_digits/0_13.txt')
print "array1 = "
print test_vector[0, 0:32]
print "array2 = "
print test_vector[0, 32:64]
print("=====================================\nStep 02: Test the classifier\n")
print "run the classifier: "
kNN.handwriting_test()
| {
"content_hash": "e3e1a798efaeda2a0f859b14ec780f46",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 39.77777777777778,
"alnum_prop": 0.5251396648044693,
"repo_name": "metesa/MachineLearningInAction-Code",
"id": "efd99be21d75152389b81d7198c4559890006fb7",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ch2/2.3_Example_a_Handwriting_Recognition_System.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22511"
}
],
"symlink_target": ""
} |
from abc import abstractmethod
class ADBService:
@abstractmethod
def save_user(self, user):
pass
@abstractmethod
def update_user(self, user):
pass
@abstractmethod
def delete_user(self, user):
pass
@abstractmethod
def find_user(self, **kwargs):
pass
@abstractmethod
def create_club(self, club):
pass
@abstractmethod
def delete_club(self, club):
pass
| {
"content_hash": "2dee4c001de164546ea4a2e0cbdedf9c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 34,
"avg_line_length": 16.178571428571427,
"alnum_prop": 0.6048565121412803,
"repo_name": "HSEFamily/komorebi-service",
"id": "d1a9e156e66348d8e139865ee280a6446b25f471",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "komorebi/domain/adbservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
} |
import sys
import os
import shutil
import re
import argparse
import mimetypes
def checkFiles(picList):
mimetypes.init()
mimetypes.add_type('image/x-nikon-nef', '.NEF')
i = 0
while i < len(picList):
pic = picList[i]
mt = mimetypes.guess_type(pic)[0]
if mt == None:
picList.remove(pic)
continue
mt = mt.split('/')
if mt[0] != 'image':
picList.remove(pic)
else:
i += 1
return picList
def main(srcPath, destPath, min = None, max = None):
if srcPath[-1] != '/':
srcPath += '/'
if destPath[-1] != '/':
destPath += '/'
if (not os.path.exists(srcPath)):
print 'Source directory does not exist'
exit(1)
if (not os.path.isdir(srcPath)):
print 'Source is not a directory'
exit(1)
if (not os.path.exists(destPath)):
print destPath + 'does not exist; check path'
exit(2)
if (not os.path.isdir(destPath)):
print destPath + ' is not a director'
newPics = sorted(checkFiles(os.listdir(srcPath)))
oldPics = sorted(checkFiles(os.listdir(destPath)))
# find the highest picture number in the destination directory
maxPic = 0
for picture in oldPics:
try:
picNum = int(picture[4:8])
if (picNum > maxPic):
maxPic = picNum
except ValueError:
print 'Unusual file name: ' + picture
continue
# find all the used numbers in the destination dir
used = [False for unused in range(maxPic + 1)]
for picture in oldPics:
try:
picNum = int(picture[4:8])
used[picNum] = True
except ValueError:
print 'Unusual file name: ' + picture
continue
i = 0
for picture in newPics:
if min and max:
try:
picNum = int(picture[4:8])
if picNum < min:
continue
elif picNum > max:
break
except ValueError:
print 'Invalid file name: ' + picture
continue
while (i < len(used) and used[i]):
i = i + 1
match = re.match('DSC_\d\d\d\d\.', picture)
if match is None:
parts = picture.rsplit('.', 1)
extension = parts[-1]
name = parts[0]
i = 1
while os.path.exists(destPath + '.'.join([name, extension])):
name = name + '-' + str(i)
newName = '.'.join([name, extension])
else:
extension = picture.rsplit('.', 1)[-1]
num = "{0:04d}".format(i + 1)
newName = "DSC_%s.%s" %(num, extension)
shutil.move(srcPath + picture, destPath + newName)
# try to move RawTherapee profile with the image
try:
shutil.move(srcPath + picture + '.pp3',
destPath + newName + '.pp3')
except IOError:
pass
try:
shutil.move(srcPath + picture + '.out.pp3',
destPath + newName + '.out.pp3')
except IOError:
pass
i = i + 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Move pictures sensibly')
parser.add_argument('src', type = str)
parser.add_argument('dest', type = str)
parser.add_argument('-r', '--range', type = str)
args = parser.parse_args()
if args.range:
match = re.match('(\d+).(\d+)', args.range)
if not match:
raise ValueError('Bad range: %s' %args.range)
min = int(match.group(1))
max = int(match.group(2))
else:
min = None
max = None
main(args.src, args.dest, min, max)
| {
"content_hash": "db500d559defd0bc7899eac619ec123e",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 76,
"avg_line_length": 30.862903225806452,
"alnum_prop": 0.5079696890514763,
"repo_name": "ndhuang/python-lib",
"id": "aa28e5dd346e6d73701d2e6ae2fdcbbb1c99ca56",
"size": "3845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "picture-management/mvpics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46833"
}
],
"symlink_target": ""
} |
import time # sleep
import sys # exit
import argparse # argparse
__author__ = 'Jack B. Du'
__copyright__ = 'Copyright (c) 2017, Jack B. Du'
__license__ = 'MIT'
__email__ = 'jackbdu@nyu.edu'
# parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument('filename', metavar='filename', type=str, help="path to file")
parser.add_argument('-fps', "--framerate", type=int, help="specify the frames per second")
args = parser.parse_args()
# buffer that stores all the frames of texts
framesBuffer = []
# load frames from the file
def load_frames(filename):
# initialize empty list for frames to return
frames = []
try:
# open file for reading
manvidfile = open(filename, 'r')
# error when opening file
except IOError:
print 'no such file or directory: ' + filename
sys.exit()
try:
# read first line in file
line = manvidfile.readline()
# get meta data from the file (first line in the file, seperated by ','
filetype, frameWidthStr, frameHeightStr, framerateStr, frameDepthStr = line.split(',')
except ValueError:
print 'error reading meta data: ' + filename
sys.exit()
if (filetype != 'manvid'):
print 'file type not supported: ' + filename
# while file not finished
while line != '':
# initialize an empty list for storing frame
frame = ''
# load one frame
for i in range(int(frameHeightStr)):
line = manvidfile.readline()
# add current line to frame
frame += line
# add current frame to frames
frames.append(frame)
# close the file
manvidfile.close()
return frames, int(frameWidthStr), int(frameHeightStr), int(framerateStr), int(frameDepthStr)
framesBuffer, frameWidth, frameHeight, framerate, frameDepth = load_frames(args.filename)
# change framerate if specified
if args.framerate != None:
framerate = args.framerate
# get the length (number) of frames
framesLength = len(framesBuffer)
try:
# repeat frames
while True:
# play frames
for i in range(framesLength):
# clear the terminal window
print(chr(27) + '[2J')
# print the frame
print framesBuffer[i]
# progress bar
print frameWidth*i/framesLength*'田'+(frameWidth-frameWidth*i/framesLength)*'囗'
# time for one frame
time.sleep(1.0/framerate)
# handle KeyboardInterrupt, typically Ctrl + C
except KeyboardInterrupt:
sys.exit()
| {
"content_hash": "6f7bd81bb99f7a1f0f1a5861f21776c5",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 97,
"avg_line_length": 30.55952380952381,
"alnum_prop": 0.6330346708219712,
"repo_name": "JackBDu/mandarinizer",
"id": "9f9d7c35128b54e4e189c6e070768488e78b4dfe",
"size": "2611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "player.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10105"
}
],
"symlink_target": ""
} |
import signal
import time
import sys
import subprocess
import json
import ibmiotf.device
# device credentials to connect to watson
tf_watson_org = 'abcdef'
tf_watson_device_type = 'tf-pi'
tf_watson_device_id = 'tf-pi-1'
tf_watson_auth_token = 'ABCDEFGHIJKLMNOPQR'
def wifi_scan():
proc = subprocess.Popen(["/sbin/wpa_cli", "scan_results"], stdout=subprocess.PIPE, universal_newlines=True)
out, err = proc.communicate()
aps = []
for line in out.split("\n"):
if ':' in line:
lst = line.split()
# mac, signal, ssid
ap = [lst[0].replace(':', ''), lst[2], lst[4] if len(lst) > 4 else '']
aps.append(ap)
return aps
def interruptHandler(signal, frame):
client.disconnect()
sys.exit(0)
options = {
'org' : tf_watson_org,
'type' : tf_watson_device_type,
'id' : tf_watson_device_id,
'auth-token' : tf_watson_auth_token,
'auth-method' : 'token'
}
try:
client = ibmiotf.device.Client(options)
client.connect()
while True:
aps = wifi_scan();
data={'wifiscan' : str(aps) }
client.publishEvent("status", "json", data)
time.sleep(60)
except Exception as e:
print(str(e))
sys.exit()
| {
"content_hash": "a39b45c5cf94227e532d0e59d4e0fa67",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 111,
"avg_line_length": 23.09090909090909,
"alnum_prop": 0.5866141732283464,
"repo_name": "truepositiontruefix/truefix-watson",
"id": "8f3b9b14179a6787a73b452a159b638ade918117",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "truefix-device/truefix-device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6142"
}
],
"symlink_target": ""
} |
from multiprocessing import Pool
import os,time,random
def long_time_task(name):
print 'Run task %s (%s)...' %(name,os.getpid())
start=time.time()
time.sleep(random.random()*3)
end=time.time()
print 'Task %s runs %0.2f seconds.' %(name,end-start)
if __name__=='__main__':
print 'Parent process %s.' %os.getpid()
p=Pool()
for i in range(5):
p.apply_async(long_time_task,args=(i,))
print 'Wating for all subprocesses done...'
p.close()
p.join()
print 'All sbuprocesses done'
from multiprocessing import Process,Queue
def write(q):
for value in ['A','B','C']:
print 'Put %s to queue...' %value
q.put(value)
time.sleep(random.random())
def read(q):
while True:
value=q.get(True)
print 'Get %s from queue.' %value
if __name__=='__main__':
q=Queue()
pw=Process(target=write,args=(q,))
pr=Process(target=read,args=(q,))
pw.start()
pr.start()
pw.join()
pr.terminate() | {
"content_hash": "93922e3c64178639e323241f2f2c724f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 54,
"avg_line_length": 22.525,
"alnum_prop": 0.6470588235294118,
"repo_name": "zengboming/python",
"id": "285784ca2a0b1554efea86f8f1a24e7debdd85fb",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pool1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "965"
},
{
"name": "Python",
"bytes": "41498"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.mail import EmailMultiAlternatives, EmailMessage
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from app import utils
FROM_EMAIL = settings.HACKATHON_NAME + ' Team <' + settings.HACKATHON_CONTACT_EMAIL + '>'
def render_mail(template_prefix, recipient_email, substitutions,
from_email=FROM_EMAIL, action_required=False):
"""
Renders an e-mail to `email`. `template_prefix` identifies the
e-mail that is to be sent, e.g. "account/email/email_confirmation"
"""
substitutions.update(utils.get_substitutions_templates())
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context=substitutions)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
prefix = '[' + settings.HACKATHON_NAME + ']'
if action_required:
prefix = '[ACTION REQUIRED]'
subject = prefix + ' ' + subject
substitutions.update({'subject': subject})
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
substitutions).strip()
except TemplateDoesNotExist:
if ext == 'txt' and not bodies:
# We need at least one body
raise
if 'txt' in bodies:
msg = EmailMultiAlternatives(subject,
bodies['txt'],
from_email,
[recipient_email])
if 'html' in bodies:
msg.attach_alternative(bodies['html'], 'text/html')
else:
msg = EmailMessage(subject,
bodies['html'],
from_email,
[recipient_email])
msg.content_subtype = 'html' # Main content is now text/html
return msg
def send_email(template_prefix, recipient_email, substitutions,
from_email=FROM_EMAIL):
msg = render_mail(template_prefix, recipient_email, substitutions,
from_email)
msg.send()
| {
"content_hash": "6a59d26055b7666c14771ec17c79d40c",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 89,
"avg_line_length": 39.39655172413793,
"alnum_prop": 0.5759299781181619,
"repo_name": "hackupc/backend",
"id": "d3b989c54d02b36160d63ae4162b12055047d5b0",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/emails.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3625"
},
{
"name": "HTML",
"bytes": "81735"
},
{
"name": "JavaScript",
"bytes": "4029"
},
{
"name": "Python",
"bytes": "118984"
},
{
"name": "Shell",
"bytes": "1659"
}
],
"symlink_target": ""
} |
import socket
import struct
from Trafficker.layer.layer import layer
class IEEE802dot3(layer):
# 802.3
# Todo
etypes = {
'STP': 0x4242
}
| {
"content_hash": "f7622ab615192b56408871bf32f0a9de",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 40,
"avg_line_length": 12.692307692307692,
"alnum_prop": 0.6242424242424243,
"repo_name": "LyleMi/Trafficker",
"id": "f3c789cc25a62131616687897987fe84e3368f79",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Trafficker/layer/ieee802.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58112"
}
],
"symlink_target": ""
} |
try:
import dbm.ndbm
except ImportError:
import dbm as dbm
import logging
# Project imports
from classified import checksum
class Incremental(object):
default_algorithm = 'sha1'
default_blocksize = 16384
def __init__(self, config):
self.config = config
# Local cache
self.cache = {}
# Configuration bits
self.algorithm = self.config.getdefault('incremental', 'algorithm',
self.default_algorithm)
self.database = self.config.get('incremental', 'database')
try:
self.blocksize = self.config.getint('incremental', 'blocksize')
except self.config.NoOptionError:
self.blocksize = self.default_blocksize
# Open the database in secure mode
self.db = dbm.ndbm.open(self.database, 'c', 0o600)
logging.info('only checking incremental changes')
logging.debug('tracking incremental changes in %s' % self.database)
def __contains__(self, item):
if str(item) in self.db:
old_value = self.db[str(item)]
new_value = self.cache.get(item, self.checksum(item))
return old_value == new_value
else:
return False
def add(self, item):
self.db[str(item)] = self.cache.get(item, self.checksum(item))
def checksum(self, item):
if self.algorithm == 'mtime':
return str(int(item.mtime))
else:
method = checksum.new(self.algorithm)
handle = item.open()
while True:
chunk = handle.read(self.blocksize)
if not chunk:
break
else:
method.update(chunk)
handle.close()
self.cache[item] = method.hexdigest()
return self.cache[item]
| {
"content_hash": "be5ba7f038c27921a190fa9ff6d751ea",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 75,
"avg_line_length": 29.095238095238095,
"alnum_prop": 0.574468085106383,
"repo_name": "tehmaze/classified",
"id": "a0f69356f54aadea15755bcb821cfca7c3ebc305",
"size": "1850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classified/incremental.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4110"
},
{
"name": "CSS",
"bytes": "55422"
},
{
"name": "Pan",
"bytes": "2896"
},
{
"name": "Python",
"bytes": "86003"
},
{
"name": "Shell",
"bytes": "5171"
}
],
"symlink_target": ""
} |
from api.models.modelrun import ModelRun
from api.models.kubepod import KubePod
from api.models.kubemetric import KubeMetric
__all__ = ["KubePod", "KubeMetric", "ModelRun"]
| {
"content_hash": "62c49a57c751d2dc254088b9eb379074",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 47,
"avg_line_length": 34.8,
"alnum_prop": 0.7758620689655172,
"repo_name": "mlbench/mlbench",
"id": "50ca51b23897a28f14c443c19fb71a06d6dc6ed2",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mlbench/master/api/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2829"
},
{
"name": "Dockerfile",
"bytes": "4848"
},
{
"name": "HTML",
"bytes": "26229"
},
{
"name": "JavaScript",
"bytes": "217157"
},
{
"name": "Makefile",
"bytes": "2646"
},
{
"name": "Python",
"bytes": "163751"
},
{
"name": "Ruby",
"bytes": "5299"
},
{
"name": "Shell",
"bytes": "10874"
},
{
"name": "Smarty",
"bytes": "1998"
}
],
"symlink_target": ""
} |
import re
import string
from collections import namedtuple, Counter
from operator import itemgetter
from common import BaseInputParser, DataClass
class InputParser(BaseInputParser):
@property
def rooms(self):
for line in self.lines:
raw_split = re.split("(\[.+\])", line.strip())
dash_split = raw_split[0].split("-")
yield Room(
encrypted_name=dash_split[:-1],
sector_id=dash_split[-1],
checksum=raw_split[1][1:-1]
)
@property
def valid_rooms(self):
for room in self.rooms:
if RoomValidator.is_valid(room):
yield room
class Room(DataClass):
def __init__(self, encrypted_name, sector_id, checksum):
super().__init__(
encrypted_name=encrypted_name,
sector_id=int(sector_id),
checksum=checksum
)
class RoomValidator:
@staticmethod
def is_valid(room: Room):
c = Counter(''.join(room.data.encrypted_name))
counted_items = c.items()
alphabetical = sorted(counted_items, key=itemgetter(0))
frequency = sorted(alphabetical, key=itemgetter(1), reverse=True)
calculated_checksum = "".join([letter[0] for letter in frequency[:5]])
return calculated_checksum == room.data.checksum
class RoomDecrypter:
@staticmethod
def _get_next_letter(character, rotation_number):
index = string.ascii_lowercase.index(character)
return string.ascii_lowercase[(index + rotation_number) % len(string.ascii_lowercase)]
@staticmethod
def decrypt(room: Room):
name = "".join(room.data.encrypted_name)
return "".join([RoomDecrypter._get_next_letter(character, room.data.sector_id) for character in name])
def main():
parser = InputParser()
id_sum = 0
for room in parser.rooms:
if RoomValidator.is_valid(room):
id_sum += room.data.sector_id
print("Id sum is %s" % id_sum)
for room in parser.valid_rooms:
if "north" in RoomDecrypter.decrypt(room):
print("Sector ID of North Pole Objects are in room '%s' in sector %s" %
(RoomDecrypter.decrypt(room), room.data.sector_id))
if __name__ == '__main__':
main()
| {
"content_hash": "48aa54018f9baf1acde37a43beca8684",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 110,
"avg_line_length": 28.949367088607595,
"alnum_prop": 0.6090948841276782,
"repo_name": "zunin/Advent-of-Code-2016",
"id": "703ae800104ef69330d048a725fc3fe11e0fc2c7",
"size": "2287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day4/day_4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23456"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_network_policy_list import V1beta1NetworkPolicyList
class TestV1beta1NetworkPolicyList(unittest.TestCase):
""" V1beta1NetworkPolicyList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1NetworkPolicyList(self):
"""
Test V1beta1NetworkPolicyList
"""
model = kubernetes.client.models.v1beta1_network_policy_list.V1beta1NetworkPolicyList()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2e723c225f3bdc5017167fc15afa02ae",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 22.9,
"alnum_prop": 0.7139737991266376,
"repo_name": "skuda/client-python",
"id": "51a107ff17ea0a5787148284d944ba5e80a740f0",
"size": "933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1beta1_network_policy_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
} |
import os;
import sys;
import shutil;
import re;
"""
https://snipplr.com/view/70884/convert-epub-to-pdf-in-linux-command-line
Readme:
# Requires
* https://wkhtmltopdf.org/downloads.html
"""
class EpubConvertor:
source_path = None; target_path = None;
def __init__(self):
self.source_path = "";
self.target_path = "";
pass;
def invoke(self):
self.source_path = os.path.realpath(self.source_path);
print(self.source_path);
assert os.path.exists(self.source_path), "Source path doesn't exist...";
assert os.path.exists(self.target_path), "Target base path doesn't exist...";
## Find title in package
raw_file = "";
with open("{0}/OEBPS/package.opf".format(self.source_path), 'r') as handler:
raw_file = handler.read();
m = re.search('(\<dc:title\>)(.*)(\</dc:title\>)', raw_file);
title = m.group(2);
os.system('rm -fR "{0}/{1}"'.format(self.target_path, title));
os.mkdir("{0}/{1}".format(self.target_path, title));
## Convert HTML files to PDF
for file in os.listdir("{0}/OEBPS".format(self.source_path)):
if ".xhtml" in file:
print("file://{0}/OEBPS/{3}".format(self.source_path, self.target_path, title, file));
os.system('wkhtmltopdf -q --title "{1}" "file://{0}/OEBPS/{3}" "{1}/{2}/{3}".pdf'.format(self.source_path, self.target_path, title, file));
## Merge PDF files to single
os.system('cd "{0}/{1}" && cat *.pdf > "{0}/{1}.pdf"'.format(os.path.realpath(self.target_path), title));
## Cleanup
os.system('rm -fR "{0}/{1}"'.format(self.target_path, title));
pass;
pass;
if __name__ == "__main__":
session = EpubConvertor();
for param_i in range(0, len(sys.argv)):
if (sys.argv[param_i] == "--epub"):
session.source_path = sys.argv[param_i + 1];
elif (sys.argv[param_i] == "--tar"):
session.target_path = sys.argv[param_i + 1];
pass;
session.invoke();
pass;
| {
"content_hash": "393a89d764e6b79b1cf36704c63498a0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 143,
"avg_line_length": 36.11764705882353,
"alnum_prop": 0.6324647122692725,
"repo_name": "agancsos/python",
"id": "77bcd9c7f00a0dab075c3f1f2be1d6209af56fb0",
"size": "1865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epub_to_pdf/epub_to_pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1882241"
},
{
"name": "Shell",
"bytes": "1865"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Samuel Curley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from kazoo.client import KazooClient
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.exceptions import NoNodeError
from ..pb.ZooKeeper_pb2 import MetaRegionServer
from ..exceptions import *
from struct import unpack
from time import sleep
import logging
logger = logging.getLogger('pybase.' + __name__)
logger.setLevel(logging.DEBUG)
znode = "/hbase"
# LocateMeta takes a string representing the location of the ZooKeeper
# quorum. It then asks ZK for the location of the MetaRegionServer,
# returning a tuple containing (host_name, port).
def LocateMaster(zkquorum, establish_connection_timeout=5, missing_znode_retries=5, zk=None):
if zk is None:
# Using Kazoo for interfacing with ZK
zk = KazooClient(hosts=zkquorum)
try:
zk.start(timeout=establish_connection_timeout)
except KazooTimeoutError:
raise ZookeeperConnectionException(
"Cannot connect to ZooKeeper at {}".format(zkquorum))
# MetaRegionServer information is located at /hbase/meta-region-server
try:
rsp, znodestat = zk.get(znode + "/meta-region-server")
except NoNodeError:
if missing_znode_retries == 0:
raise ZookeeperZNodeException(
"ZooKeeper does not contain meta-region-server node.")
logger.warn(
"ZooKeeper does not contain meta-region-server node. Retrying in 2 seconds. (%s retries remaining)", missing_znode_retries)
sleep(2.0)
return LocateMeta(zkquorum, establish_connection_timeout=establish_connection_timeout, missing_znode_retries=missing_znode_retries - 1, zk=zk)
# We don't need to maintain a connection to ZK. If we need it again we'll
# recreate the connection. A possible future implementation can subscribe
# to ZK and listen for when RegionServers go down, then pre-emptively
# reestablish those regions instead of waiting for a failed rpc to come
# back. Only issue is that if too many clients subscribe ZK may become
# overloaded.
zk.stop()
if len(rsp) == 0:
# Empty response is bad.
raise ZookeeperResponseException(
"ZooKeeper returned an empty response")
# The first byte must be \xff and the next four bytes are a little-endian
# uint32 containing the length of the meta.
first_byte, meta_length = unpack(">cI", rsp[:5])
if first_byte != '\xff':
# Malformed response
raise ZookeeperResponseException(
"ZooKeeper returned an invalid response")
if meta_length < 1 or meta_length > 65000:
# Is this really an error?
raise ZookeeperResponseException(
"ZooKeeper returned too much meta information")
# ZNode data in HBase are serialized protobufs with a four byte magic
# 'PBUF' prefix.
magic = unpack(">I", rsp[meta_length + 5:meta_length + 9])[0]
if magic != 1346524486:
# 4 bytes: PBUF
raise ZookeeperResponseException(
"ZooKeeper returned an invalid response (are you running a version of HBase supporting Protobufs?)")
rsp = rsp[meta_length + 9:]
meta = MetaRegionServer()
meta.ParseFromString(rsp)
logger.info('Discovered Master at %s:%s',
meta.server.host_name, meta.server.port)
return meta.server.host_name, meta.server.port
| {
"content_hash": "b6778f4ce62d70826a85e2acb514cd5d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 150,
"avg_line_length": 43.853932584269664,
"alnum_prop": 0.7022802972072765,
"repo_name": "CurleySamuel/PyBase",
"id": "5c396d828dfbd8a96f865a7107ff9e134c5e5f51",
"size": "3903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zk/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "72330"
},
{
"name": "Python",
"bytes": "103140"
},
{
"name": "Shell",
"bytes": "4058"
}
],
"symlink_target": ""
} |
__author__ = 'Ge Yang'
from instrumenttypes import * | {
"content_hash": "69205f6971e7aa6d38af463e85d663cc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 29,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.6981132075471698,
"repo_name": "episodeyang/instruments",
"id": "61d94a1ded3bd6a2cb07b861e6e737b081d8e50c",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instruments/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30487"
}
],
"symlink_target": ""
} |
from marshmallow import Schema, fields, pre_dump
from horse.web.schemas.movie import MovieSchema
class UserSchema(Schema):
pk = fields.Str(dump_only=True)
name = fields.Str(required=True)
followed_users = fields.Nested(
'self', only=('pk', 'name'), dump_only=True, many=True
)
liked_movies = fields.Nested(MovieSchema, dump_only=True, many=True)
@pre_dump
def load_movies_and_users(self, obj):
obj.followed_users = obj.get_followed_users()
obj.liked_movies = obj.get_liked_movies()
return obj
class UserActionSchema(Schema):
pk = fields.Str(required=True)
user_schema = UserSchema()
users_schema = UserSchema(many=True)
user_action_schema = UserActionSchema()
| {
"content_hash": "78323d84e58177d0ccd9bd08b104ea68",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.6885245901639344,
"repo_name": "pragmaticcoders/horse",
"id": "dec5a915030adfda34fa2ea24e0f6e88e28ea0d7",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horse/web/schemas/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31460"
}
],
"symlink_target": ""
} |
"""
# @package coursera2012
# @author Rolf Hemmerling <hemmerling@gmx.net>
# @version 1.00
# @date 2015-01-01
# @copyright Apache License, Version 2.0
#
# Implementation of the game
# "Asteroids"
# for the Coursera course
# "An Introduction to Interactive Programming in Python"
#
# My game displays the highscore of last game,
# as long as no new game is started :-).
#
# As I used PyUnit for external testing, some functions
# may contain extra code to accompish that,
# e.g. "apparently unnecessary extra" return values,
# "apparently unnecessary extra" split of functions,
# "apparently unnecessary" complicated implementation of code,
# some few print commands for traditional "print on console"
# testing are in comments.
#
# Copyright 2012-2015 Rolf Hemmerling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# 1 Import modules
import simplegui
import math
import random
# 2 Initialize global variables
WIDTH = 800
HEIGHT = 600
NUMBER_OF_LIVES = 3
score = 0
lives = NUMBER_OF_LIVES
time = 0
started = False
# Initialisation of the random generator
# on my computer, by this, the result of
# random.randrange(self.randomStartValue,self.randomStopValue,1)
# - is always "1" on Python 2.7.3, at first run.
# - is always "2" on ColdeSkulptor, at first run.
myRandomSeed = 3
# Global objects
ship_image = None
# Global class objects
my_ship = None
#a_rock = None
a_missile = None
# Collection of rocks and missiles
rock_group = None
missile_group = None
# define classes
class ImageInfo:
""" Collection of informations about the images"""
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
# Ship class
class Ship:
# The timing for the delay the thrust sound plays
# after keyup of the thrust key
# is suitable for Dell VOSTRO 1000 notebooks
THRUST_SOUND_DELAY = 20
# Acceleration by pressing the thrust key, for one period
THRUST = 1
# Friction
FRICTION = 0.1
# Speed of the missile
MISSILE_SPEED = 20
""" Ship class """
def __init__(self, pos, vel, angle, image, info):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
# get lifespam of object
self.lifespan = info.get_lifespan()
self.thrust_lifespan = 0 # might alternatively be an image property...
def draw(self,canvas):
global ship_thrust_sound, started
#canvas.draw_circle(self.pos, self.radius, 1, "White", "White")
image_center_draw = self.image_center
if (self.thrust):
image_center_draw = [self.image_center[0] + self.image_size[0], self.image_center[1]]
else:
image_center_draw = [self.image_center[0], self.image_center[1]]
canvas.draw_image(self.image, image_center_draw, self.image_size, self.pos, self.image_size, self.angle)
# Just for sound
if (self.thrust):
self.thrust_lifespan = self.THRUST_SOUND_DELAY
if (self.thrust_lifespan>0):
self.thrust_lifespan -=1
ship_thrust_sound.play()
else:
ship_thrust_sound.rewind()
return
# helper functions to handle transformations
def angle_to_vector(self, ang):
return [math.cos(ang), math.sin(ang)]
def update(self):
self.angle += self.angle_vel
# Speed update
# Slowdown due to friction ( in Space :-) )!
if (self.thrust):
self.vel = [self.vel[0]*(1-self.FRICTION)+self.THRUST*self.angle_to_vector(self.angle)[0], \
self.vel[1]*(1-self.FRICTION)+self.THRUST*self.angle_to_vector(self.angle)[1]]
else:
self.vel = [self.vel[0]*(1-self.FRICTION), self.vel[1]*(1-self.FRICTION)]
# Position update
self.pos = [(self.pos[0]+self.vel[0])%WIDTH, (self.pos[1]+self.vel[1])%HEIGHT]
return
def shoot(self):
global missile_group
#global a_missile
#a_missile = self.generate_dynamic_missile()
missile_group.append(self.generate_dynamic_missile())
return
# Generation of a missile with dymamic parameters
def generate_dynamic_missile(self):
global missile_image1, missile_image2, missile_image3, \
missile_info, missile_sound
newmissile_pos = [self.pos[0]+self.angle_to_vector(self.angle)[0]*self.image_size[0]/2, \
self.pos[1]+self.angle_to_vector(self.angle)[1]*self.image_size[1]/2]
newmissile_vel = [self.vel[0]+self.MISSILE_SPEED*self.angle_to_vector(self.angle)[0], \
self.vel[1]+self.MISSILE_SPEED*self.angle_to_vector(self.angle)[1]]
newmissile_ang = self.angle
newmissile_ang_vel = 0
# Random selection of the kind of missile
#newmissile_image = missile_image1
newmissile_image_dic = { 0: missile_image1, 1:missile_image2, 2:missile_image3}
newmissile_image = newmissile_image_dic[random.randrange(0, 3, 1)]
newmissile_info = missile_info
newmissile_sound = missile_sound
return Sprite( newmissile_pos, newmissile_vel, newmissile_ang, newmissile_ang_vel, \
newmissile_image, newmissile_info, newmissile_sound)
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
# Sprite class
class Sprite:
""" Sprite Class """
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.animated = info.get_animated()
self.lifespan = info.get_lifespan()
self.age = 0
if sound:
sound.rewind()
sound.play()
def draw(self, canvas):
#canvas.draw_circle(self.pos, self.radius, 1, "Red", "Red")
canvas.draw_image(self.image, self.image_center, self.image_size, self.pos, self.image_size, self.angle)
return
def update(self):
self.age += 1
self.angle += self.angle_vel
self.pos = [(self.pos[0]+self.vel[0])%WIDTH, (self.pos[1]+self.vel[1])%HEIGHT]
return self.age < self.lifespan
# helper functions to handle transformations
def dist(self, p,q):
return math.sqrt((p[0]-q[0])**2+(p[1]-q[1])**2)
def collide(self, other_object):
"""Check if 2 objects collide"""
#collision = False
distance = self.dist(self.get_position(), other_object.get_position())
collision = (distance < ( self.get_radius() + other_object.get_radius()))
#if collision:
# print "Kollision!"
return collision
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
class AsteroidsGame():
""" Implementation of the game Asteroids """
# 3 initialize class globals
SHIP_ANGLE_VELOCITY = 0.1
ROCK_MINSPEED = -5
ROCK_MAXSPEED = 5
ROCK_MIN_ANGVEL = -3
ROCK_MAX_ANGVEL = 3
# 4 Helper functions to initialize game
def init(self):
global debris_info, nebula_info, splash_info, \
ship_info, missile_info, asteroid_info, \
explosion_info
global debris_image1, debris_image2, debris_image3, \
debris_image4, debris_image5, debris_image6, \
debris_image7, debris_image8, debris_image8, \
debris_image9, \
nebula_image1, nebula_image2, \
splash_image, ship_image, \
missile_image1, missile_image2, missile_image3, \
asteroid_image1, asteroid_image2, asteroid_image3, \
explosion_image
global soundtrack, missile_sound, ship_thrust_sound, \
explosion_sound
global my_ship, a_rock, a_missile
global rock_group, missile_group
global myRandomSeed
global started, score, lives
random.seed(myRandomSeed)
# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image1 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris1_brown.png")
debris_image2 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_brown.png")
debris_image3 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris3_brown.png")
debris_image4 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris4_brown.png")
debris_image5 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris1_blue.png")
debris_image6 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
debris_image7 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris3_blue.png")
debris_image8 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris4_blue.png")
debris_image9 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris_blend.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image1 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.png")
nebula_image2 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_brown.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
# lifespan reduced from 50 ( = given value ) to 10
missile_info = ImageInfo([5,5], [10, 10], 3, 10)
missile_image1 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot1.png")
missile_image2 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
missile_image3 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot3.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image1 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
asteroid_image2 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_brown.png")
asteroid_image3 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blend.png")
# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image1 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_orange.png")
explosion_image2 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_blue.png")
explosion_image3 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_blue2.png")
explosion_image4 = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
self.init2()
return
def init2(self):
global debris_info, nebula_info, splash_info, \
ship_info, missile_info, asteroid_info, \
explosion_info
global debris_image1, debris_image2, debris_image3, \
debris_image4, debris_image5, debris_image6, \
debris_image7, debris_image8, debris_image8, \
debris_image9, \
nebula_image1, nebula_image2, \
splash_image, ship_image, \
missile_image1, missile_image2, missile_image3, \
asteroid_image1, asteroid_image2, asteroid_image3, \
explosion_image
global soundtrack, missile_sound, ship_thrust_sound, \
explosion_sound
global my_ship, a_rock, a_missile
global rock_group, missile_group
global myRandomSeed
global started, score, lives
# Stop Sound
soundtrack.rewind()
missile_sound.rewind()
ship_thrust_sound.rewind()
explosion_sound.rewind()
started = False
lives = NUMBER_OF_LIVES
# initialize ship
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)
# create a static Rock
#a_rock = Sprite([WIDTH / 3, HEIGHT / 3], [1, 1], 0, 0.1, asteroid_image, asteroid_info)
#self.generate_static_rock()
rock_group = []
self.rock_spawner()
# create a static Missile
#a_missile = Sprite([2 * WIDTH / 3, 2 * HEIGHT / 3], [-1,1], 0, 0, missile_image1, missile_info, missile_sound)
missile_group = []
#a_missile = self.generate_static_missile()
return
# 5 Define event handlers
def group_collide(self, sprite_group, other_object, number_of_lives = 0):
"""Check for collisions of object with group"""
for sprite in sprite_group:
if sprite.collide(other_object):
sprite_group.remove(sprite)
number_of_lives -=1
return number_of_lives
def group_group_collide(self, group1, group2):
"""Check for collision of 2 groups"""
number_of_collisions = 0
sum_of_collisions = 0
for group1_member in group1:
number_of_collisions = -self.group_collide(group2, group1_member)
sum_of_collisions += number_of_collisions
if number_of_collisions > 0:
group1.remove(group1_member)
return sum_of_collisions
def process_sprite_group(self, canvas, sprite_group):
"""Visualize the sprites on the screen"""
for sprite in sprite_group:
sprite.draw(canvas)
if not sprite.update():
sprite_group.remove(sprite)
return
# mouseclick handlers that reset UI and conditions whether splash image is drawn
def click(self, pos):
global started, score
center = [WIDTH / 2, HEIGHT / 2]
size = splash_info.get_size()
inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)
inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)
if (not started) and inwidth and inheight:
started = True
# Reset score just when new game starts
score = 0
def draw(self, canvas):
""" Draw Handler """
global time
global debris_info, nebula_info, splash_info, \
ship_info, missile_info, asteroid_info, \
explosion_info
global debris_image1, debris_image2, debris_image3, \
debris_image4, debris_image5, debris_image6, \
debris_image7, debris_image8, debris_image8, \
debris_image9, \
nebula_image1, nebula_image2, \
splash_image, ship_image, \
missile_image1, missile_image2, missile_image3, \
asteroid_image1, asteroid_image2, asteroid_image3, \
explosion_image
global soundtrack, missile_sound, ship_thrust_sound, \
explosion_sound
global my_ship, a_rock, a_missile
global lives, score
global started
# animiate background
time += 1
center = debris_info.get_center()
size = debris_info.get_size()
wtime = (time / 8) % center[0]
canvas.draw_image(nebula_image2, nebula_info.get_center(), nebula_info.get_size(), [WIDTH/2, HEIGHT/2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image5, [center[0]-wtime, center[1]], [size[0]-2*wtime, size[1]],
[WIDTH/2+1.25*wtime, HEIGHT/2], [WIDTH-2.5*wtime, HEIGHT])
canvas.draw_image(debris_image5, [size[0]-wtime, center[1]], [2*wtime, size[1]],
[1.25*wtime, HEIGHT/2], [2.5*wtime, HEIGHT])
# check for collisions
lives = self.group_collide(rock_group, my_ship, lives)
score += self.group_group_collide(missile_group, rock_group)
if (lives <= 0):
started = False
# it is important NOT to load the sounds again,
# but just to stop it!
self.init2()
if started:
# draw ship and update ship
my_ship.draw(canvas)
my_ship.update()
# draw sprites and update sprites
#a_rock.draw(canvas)
#a_rock.update()
self.process_sprite_group(canvas, rock_group)
# draw missiles and update missiles
#a_missile.draw(canvas)
#a_missile.update()
self.process_sprite_group(canvas, missile_group)
# update score and lifes
canvas.draw_text("Lives "+ str(lives), (WIDTH/8, 50), 24, "White")
else:
# draw splash screen if not started
canvas.draw_image(splash_image, splash_info.get_center(),
splash_info.get_size(), [WIDTH/2, HEIGHT/2],
splash_info.get_size())
# Display score of current game
# and display highscore of last game, while waiting for new game
canvas.draw_text("Score "+ str(score), (WIDTH-WIDTH/4, 50), 24, "White")
return
# timer handler that spawns a rock
def rock_spawner(self):
if started:
#global rock_group
#global a_rock
#a_rock = self.generate_dynamic_rock()
#a_rock = self.generate_static_rock()
# just generate a rock if there are less than 12
# rocks in space
if (len(rock_group) < 12):
rock_group.append(self.generate_dynamic_rock())
return
# Generation of a rock with static parameters
def generate_static_rock(self):
global asteroid_image1, asteroid_info
rock_pos = [WIDTH / 3, HEIGHT / 3]
rock_vel = [1, 1]
rock_ang = 0
rock_ang_vel = 0
rock_image = asteroid_image1
rock_info = asteroid_info
return Sprite(rock_pos, rock_vel, rock_ang, rock_ang_vel, rock_image, rock_info)
# Generation of a rock with dynamic parameters
def generate_dynamic_rock(self):
global asteroid_image1, asteroid_image2, asteroid_image3, \
asteroid_info
global my_ship
# rock position must be in some distance of ship
# to make it easy, the distance is always positive,
# but of course by modulo arithmetics the rock is kept on screen
#rock_pos = [random.randrange(0,WIDTH,1), random.randrange(0,HEIGHT,1)]
rock_pos = [(my_ship.pos[0]+random.randrange(-WIDTH/4,WIDTH/4,1) + WIDTH/2) % WIDTH,
(my_ship.pos[1]+random.randrange(-HEIGHT/4,HEIGHT/4,1) + HEIGHT/2) % HEIGHT]
rock_vel = [random.randrange(self.ROCK_MINSPEED, self.ROCK_MAXSPEED,1), \
random.randrange(self.ROCK_MINSPEED, self.ROCK_MAXSPEED,1)]
rock_ang = random.randrange(0, 360, 1)/(2*math.pi)
rock_ang_vel = random.randrange(self.ROCK_MIN_ANGVEL, self.ROCK_MAX_ANGVEL, 1)*self.SHIP_ANGLE_VELOCITY
# Random asteroid selection
#rock_image = asteroid_image1
# initialize asteroid dic
asteroid_image_dic = { 0: asteroid_image1, 1:asteroid_image2, 2:asteroid_image3}
rock_image = asteroid_image_dic[random.randrange(0, 3, 1)]
rock_info = asteroid_info
return Sprite(rock_pos, rock_vel, rock_ang, rock_ang_vel, rock_image, rock_info)
# Generation of a missile with static parameters
def generate_static_missile(self):
global missile_image1, missile_image2, missile_image3, \
missile_info, missile_sound
newmissile_pos = [2 * WIDTH / 3, 2 * HEIGHT / 3]
newmissile_vel = [-1,1]
newmissile_ang = 0
newmissile_ang_vel = 0
newmissile_image = missile_image2
newmissile_info = missile_info
newmissile_sound = missile_sound
return Sprite( newmissile_pos, newmissile_vel, newmissile_ang, newmissile_ang_vel, \
newmissile_image, newmissile_info, newmissile_sound)
def keydown(self, key):
if started:
""" Key pressed """
#print "keydown = ", key, int(key)
if (key == 37): # key "arrow left"
my_ship.angle_vel = -self.SHIP_ANGLE_VELOCITY
if (key == 39): # key "arrow right"
my_ship.angle_vel = self.SHIP_ANGLE_VELOCITY
if (key == 38): # key "arrow up"
my_ship.thrust = True
if (key == 32): # key "space"
my_ship.shoot()
return
def keyup(self, key):
""" Key released """
if started:
if (key == 37): # key "arrow left"
my_ship.angle_vel = 0
if (key == 39): # key "arrow right"
my_ship.angle_vel = 0
if (key == 38): # key "arrow up"
my_ship.thrust = False
if (key == 32): # key "space"
# Also shoot if you rise the key = double frequency
#my_ship.shoot()
pass
return
# This function is executed to start the application
def main(self):
""" Class start function """
self.init()
# 6 Initialize frame
global frame
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# 7 register handlers
frame.set_draw_handler(self.draw)
timer = simplegui.create_timer(1000.0, self.rock_spawner)
frame.set_keydown_handler(self.keydown)
frame.set_keyup_handler(self.keyup)
frame.set_mouseclick_handler(self.click)
# 8 # get things rolling
timer.start()
frame.start()
return
# always remember to check your completed program against the grading rubric
| {
"content_hash": "9dd5e1bfb843d5e8272b41624224b0ab",
"timestamp": "",
"source": "github",
"line_count": 617,
"max_line_length": 139,
"avg_line_length": 42.705024311183145,
"alnum_prop": 0.5925082545827166,
"repo_name": "hemmerling/python-coursera2012",
"id": "883c08d659efa7d493a2aabf898d0eaddacdec3d",
"size": "26397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/week8/asteroids.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "316418"
}
],
"symlink_target": ""
} |
import os
# Directory for tweet content.
content_dir = 'content'
# File containing simple tweet content.
source_filename = 'simple.txt'
# File for already-tweeted content.
used_content_filename = 'used.txt'
def new_tweet():
""" Returns a new tweet from the simple content source. """
filename = get_filepath(source_filename)
with open(filename, 'r+') as file:
lines = file.readlines()
tweet_content = lines[-1]
copy_to_used_file(tweet_content)
delete_content(tweet_content)
return tweet_content.rstrip('\n')
def get_filepath(filename):
""" Returns the filepath in the content directory with the given filename.
"""
content_source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
content_filepath = os.path.join(content_source_dir, content_dir, filename)
return content_filepath
def delete_content(content):
""" Deletes the given content (e.g. a tweet) from the simple content source.
"""
filename = get_filepath(source_filename)
with open(filename, 'r+') as file:
lines = file.readlines()
file.seek(0)
for line in lines:
if line != content:
file.write(line)
file.truncate()
def copy_to_used_file(content):
""" Appends the given content to the used-tweets file. """
filepath = get_filepath(used_content_filename)
with open(filepath, 'a') as file:
file.write(content) | {
"content_hash": "55aaf8062345ec0ca03eb574a5d02e2f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 85,
"avg_line_length": 31.02127659574468,
"alnum_prop": 0.6556927297668038,
"repo_name": "SheldonSandbekkhaug/water-bot",
"id": "dda856400569cfcf28657b8ebcb075bd8ba54db3",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/simple_tweet_source.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48565"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.conf import settings
from django.test import TestCase
from package.tests import data, initial_data
from searchv2.utils import remove_prefix, clean_title
class UtilFunctionTest(TestCase):
def setUp(self):
self.values = []
for value in ["-me",".me","/me","_me"]:
value = "{0}{1}".format(settings.PACKAGINATOR_SEARCH_PREFIX.lower(), value)
self.values.append(value)
def test_remove_prefix(self):
for value in self.values:
self.assertEqual(remove_prefix(value), "me")
def test_clean_title(self):
test_value = "{0}me".format(settings.PACKAGINATOR_SEARCH_PREFIX.lower())
for value in self.values:
self.assertEqual(clean_title(value), test_value)
| {
"content_hash": "d1be90a66822ccf195d17ea02b65ce61",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 31.653846153846153,
"alnum_prop": 0.6330498177399757,
"repo_name": "benracine/opencomparison",
"id": "494e5f6149f601c18978ee533a2033ccb7e411d7",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/searchv2/tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "245807"
},
{
"name": "Python",
"bytes": "570176"
}
],
"symlink_target": ""
} |
import canvas_manager
from canvas_manager import CanvasManager
import os
from PIL import Image, ImageTk
from tag_editor_popup import TagEditorPopup
from target_pickler import TargetPickler
import Tkinter, tkFileDialog, tkMessageBox, ttk
CURSOR = 0
IMAGE = 1
RECTANGLE = 2
OVAL = 3
TRIANGLE = 4
FREEFORM_POLYGON = 5
D_SILHOUETTE_3 = 6
D_SILHOUETTE_4 = 7
D_SILHOUETTE_5 = 8
CANVAS_BACKGROUND = (1,)
class TargetEditor():
def save_target(self):
is_animated = self._canvas_manager.is_animated(self._regions)
if is_animated:
initdir = "animated_targets/"
else:
initdir = "targets/"
target_file = tkFileDialog.asksaveasfilename(
defaultextension=".target",
filetypes=[("ShootOFF Target", ".target")],
initialdir=initdir,
title="Save ShootOFF Target",
parent=self._window)
is_new_target = target_file and not os.path.isfile(target_file)
if target_file:
target_pickler = TargetPickler()
target_pickler.save(target_file, self._regions,
self._target_canvas)
if (is_new_target):
self._notify_new_target(target_file, is_animated)
def color_selected(self, event):
self._target_canvas.focus_set()
if (self._selected_region is not None and
self._selected_region != CANVAS_BACKGROUND):
self._target_canvas.itemconfig(self._selected_region,
fill=self._fill_color_combo.get())
def bring_forward(self):
if (self._selected_region is not None and
self._selected_region != CANVAS_BACKGROUND):
below = self._target_canvas.find_above(self._selected_region)
if len(below) > 0:
self._target_canvas.tag_raise(self._selected_region,
below)
# we have to change the order in the regions list
# as well so the z order is maintained during pickling
self.reverse_regions(below, self._selected_region)
def send_backward(self):
if (self._selected_region is not None and
self._selected_region != CANVAS_BACKGROUND):
above = self._target_canvas.find_below(self._selected_region)
if len(above) > 0 and above != CANVAS_BACKGROUND:
self._target_canvas.tag_lower(self._selected_region,
above)
# we have to change the order in the regions list
# as well so the z order is maintained during pickling
self.reverse_regions(above, self._selected_region)
def reverse_regions(self, region1, region2):
r1 = self._regions.index(region1[0])
r2 = self._regions.index(region2[0])
self._regions[r2], self._regions[r1] = self._regions[r1], self._regions[r2]
def undo_vertex(self, event):
if self._radio_selection.get() == FREEFORM_POLYGON:
# Remove the last vertex (if there is
if len(self._freeform_vertices_ids) > 0:
self._target_canvas.delete(self._freeform_vertices_ids[-1])
del self._freeform_vertices_points[-1]
del self._freeform_vertices_ids[-1]
# Remove the last edge (if there is one)
if len(self._freeform_edges_ids) > 0:
self._target_canvas.delete(self._freeform_edges_ids[-1])
del self._freeform_edges_ids[-1]
if self._freeform_temp_line_id is not None:
self._target_canvas.delete(self._freeform_temp_line_id)
self._freeform_temp_line_id = None
def _reset_freeform_polygon(self):
self._target_canvas.delete("_shape:vertex")
self._target_canvas.delete("_shape:freeform_edge")
self._freeform_vertices_points = []
self._freeform_vertices_ids = []
self._freeform_edges_ids = []
self._freeform_temp_line_id = None
def radio_button_click(self):
if self._radio_selection.get() != FREEFORM_POLYGON:
self._reset_freeform_polygon()
if self._radio_selection.get() == IMAGE:
image_file = tkFileDialog.askopenfilename(defaultextension=".*",
filetypes=[("Graphics Interchange Format", ".gif"),
("Portable Network Graphic", ".png")],
initialdir="animated_targets/",
title="Open Target Image",
parent=self._window)
if image_file == None or len(image_file) == 0:
self._radio_selection.set(CURSOR)
return
self._image_path = os.path.relpath(image_file)
def canvas_right_click(self, event):
if self._radio_selection.get() == FREEFORM_POLYGON:
if len(self._freeform_vertices_points) < 4:
tkMessageBox.showerror("Invalid Regular Polygon",
"A freeform polygon must have at least 3 vertices and should be " +
"closed.",
parent=self._frame)
return
# Make the last region the same as the first region, otherwise
# they might not line up
self._freeform_vertices_points[-1] = self._freeform_vertices_points[0]
# Create the new region
self._freeform_region = self._target_canvas.create_polygon(
self._freeform_vertices_points,
fill="black", outline="black", stipple="gray25",
tags=("_shape:freeform_polygon"))
self._regions.append(self._freeform_region)
self._create_cursor_shape(event)
# Delete all temporary data and shapes
self._reset_freeform_polygon()
def canvas_click(self, event):
if self._radio_selection.get() == FREEFORM_POLYGON:
self._freeform_vertices_points.append((event.x, event.y))
self._freeform_vertices_ids.append(self._cursor_shape)
if self._freeform_temp_line_id is not None:
self._freeform_edges_ids.append(self._freeform_temp_line_id)
self._create_cursor_shape(event)
elif self._radio_selection.get() == IMAGE:
# Make image a part of the target
image = self._canvas_manager.cache_image_frames(self._cursor_shape, self._image_path)
self._target_canvas.itemconfig(self._cursor_shape, image=image)
self._canvas_manager.animate(self._cursor_shape, image)
self._regions.append(self._cursor_shape)
self._create_cursor_shape(event)
elif self._radio_selection.get() != CURSOR:
# This will make it so that mouse move event
# won't delete the current cursor shape and will
# make a new one, thus leaving the current shape
# as a region
self._regions.append(self._cursor_shape)
self._create_cursor_shape(event)
else:
old_region = self._selected_region
self._selected_region = event.widget.find_closest(
event.x, event.y)
self._canvas_manager.selection_update_listener(old_region,
self._selected_region)
if self._selected_region != CANVAS_BACKGROUND:
tags = self._target_canvas.gettags(self._selected_region)
if not "_shape:image" in tags:
self._fill_color_combo.configure(state="readonly")
self._fill_color_combo.set(
event.widget.itemcget(self._selected_region, "fill"))
else:
self._fill_color_combo.configure(state=Tkinter.DISABLED)
self._tags_button.configure(state=Tkinter.NORMAL)
if self._tag_popup_state.get()==True:
self.toggle_tag_editor()
else:
self._fill_color_combo.configure(state=Tkinter.DISABLED)
self._tags_button.configure(state=Tkinter.DISABLED)
if self._tag_popup_state.get()==True:
self._tag_popup_state.set(False)
self.toggle_tag_editor()
def canvas_mouse_move(self, event):
if self._cursor_shape is not None:
self._target_canvas.delete(self._cursor_shape)
if self._freeform_temp_line_id is not None:
self._target_canvas.delete(self._freeform_temp_line_id)
if self._radio_selection.get() == CURSOR:
self._cursor_shape = None
self._create_cursor_shape(event)
def _create_cursor_shape(self, event):
initial_size = 30
aqt_scale = 2.5
if self._radio_selection.get() == IMAGE:
image = Image.open(self._image_path)
self._cursor_photoimage = ImageTk.PhotoImage(image)
self._cursor_shape = self._target_canvas.create_image(
event.x, event.y, image=self._cursor_photoimage,
tags=("_shape:image", "_path:" + self._image_path))
elif self._radio_selection.get() == RECTANGLE:
self._cursor_shape = self._target_canvas.create_rectangle(
event.x - initial_size,
event.y - initial_size,
event.x + initial_size,
event.y + initial_size,
fill="black", stipple="gray25", tags=("_shape:rectangle"))
elif self._radio_selection.get() == OVAL:
self._cursor_shape = self._target_canvas.create_oval(
event.x - initial_size,
event.y - initial_size,
event.x + initial_size,
event.y + initial_size,
fill="black", stipple="gray25", tags=("_shape:oval"))
elif self._radio_selection.get() == TRIANGLE:
self._cursor_shape = self._target_canvas.create_polygon(
event.x,
event.y - initial_size,
event.x + initial_size,
event.y + initial_size,
event.x - initial_size,
event.y + initial_size,
event.x,
event.y - initial_size,
fill="black", outline="black", stipple="gray25",
tags=("_shape:triangle"))
elif self._radio_selection.get() == D_SILHOUETTE_3:
self._cursor_shape = self._target_canvas.create_polygon(
event.x+15.083*aqt_scale,event.y+13.12*aqt_scale,
event.x+15.083*aqt_scale,event.y+-0.147*aqt_scale,
event.x+14.277*aqt_scale,event.y+-2.508*aqt_scale,
event.x+13.149*aqt_scale,event.y+-4.115*aqt_scale,
event.x+11.841*aqt_scale,event.y+-5.257*aqt_scale,
event.x+10.557*aqt_scale,event.y+-6.064*aqt_scale,
event.x+8.689*aqt_scale,event.y+-6.811*aqt_scale,
event.x+7.539*aqt_scale,event.y+-8.439*aqt_scale,
event.x+7.076*aqt_scale,event.y+-9.978*aqt_scale,
event.x+6.104*aqt_scale,event.y+-11.577*aqt_scale,
event.x+4.82*aqt_scale,event.y+-12.829*aqt_scale,
event.x+3.43*aqt_scale,event.y+-13.788*aqt_scale,
event.x+1.757*aqt_scale,event.y+-14.386*aqt_scale,
event.x+0.083*aqt_scale,event.y+-14.55*aqt_scale,
event.x+-1.59*aqt_scale,event.y+-14.386*aqt_scale,
event.x+-3.263*aqt_scale,event.y+-13.788*aqt_scale,
event.x+-4.653*aqt_scale,event.y+-12.829*aqt_scale,
event.x+-5.938*aqt_scale,event.y+-11.577*aqt_scale,
event.x+-6.909*aqt_scale,event.y+-9.978*aqt_scale,
event.x+-7.372*aqt_scale,event.y+-8.439*aqt_scale,
event.x+-8.522*aqt_scale,event.y+-6.811*aqt_scale,
event.x+-10.39*aqt_scale,event.y+-6.064*aqt_scale,
event.x+-11.674*aqt_scale,event.y+-5.257*aqt_scale,
event.x+-12.982*aqt_scale,event.y+-4.115*aqt_scale,
event.x+-14.11*aqt_scale,event.y+-2.508*aqt_scale,
event.x+-14.917*aqt_scale,event.y+-0.147*aqt_scale,
event.x+-14.917*aqt_scale,event.y+13.12*aqt_scale,
fill="black", outline="black", stipple="gray25",
tags=("_shape:aqt3"))
elif self._radio_selection.get() == D_SILHOUETTE_4:
self._cursor_shape = self._target_canvas.create_polygon(
event.x+11.66*aqt_scale,event.y+5.51*aqt_scale,
event.x+11.595*aqt_scale,event.y+0.689*aqt_scale,
event.x+11.1*aqt_scale,event.y+-1.084*aqt_scale,
event.x+9.832*aqt_scale,event.y+-2.441*aqt_scale,
event.x+7.677*aqt_scale,event.y+-3.322*aqt_scale,
event.x+5.821*aqt_scale,event.y+-4.709*aqt_scale,
event.x+4.715*aqt_scale,event.y+-6.497*aqt_scale,
event.x+4.267*aqt_scale,event.y+-8.135*aqt_scale,
event.x+3.669*aqt_scale,event.y+-9.41*aqt_scale,
event.x+2.534*aqt_scale,event.y+-10.553*aqt_scale,
event.x+1.436*aqt_scale,event.y+-11.091*aqt_scale,
event.x+0.083*aqt_scale,event.y+-11.323*aqt_scale,
event.x+-1.269*aqt_scale,event.y+-11.091*aqt_scale,
event.x+-2.367*aqt_scale,event.y+-10.553*aqt_scale,
event.x+-3.502*aqt_scale,event.y+-9.41*aqt_scale,
event.x+-4.1*aqt_scale,event.y+-8.135*aqt_scale,
event.x+-4.548*aqt_scale,event.y+-6.497*aqt_scale,
event.x+-5.654*aqt_scale,event.y+-4.709*aqt_scale,
event.x+-7.51*aqt_scale,event.y+-3.322*aqt_scale,
event.x+-9.665*aqt_scale,event.y+-2.441*aqt_scale,
event.x+-10.933*aqt_scale,event.y+-1.084*aqt_scale,
event.x+-11.428*aqt_scale,event.y+0.689*aqt_scale,
event.x+-11.493*aqt_scale,event.y+5.51*aqt_scale,
fill="black", outline="black", stipple="gray25",
tags=("_shape:aqt4"))
elif self._radio_selection.get() == D_SILHOUETTE_5:
self._cursor_shape = self._target_canvas.create_polygon(
event.x+7.893*aqt_scale,event.y+3.418*aqt_scale,
event.x+7.893*aqt_scale,event.y+1.147*aqt_scale,
event.x+7.255*aqt_scale,event.y+0.331*aqt_scale,
event.x+5.622*aqt_scale,event.y+-0.247*aqt_scale,
event.x+4.187*aqt_scale,event.y+-1.124*aqt_scale,
event.x+2.833*aqt_scale,event.y+-2.339*aqt_scale,
event.x+1.917*aqt_scale,event.y+-3.594*aqt_scale,
event.x+1.219*aqt_scale,event.y+-5.048*aqt_scale,
event.x+0.9*aqt_scale,event.y+-6.223*aqt_scale,
event.x+0.801*aqt_scale,event.y+-7.1*aqt_scale,
event.x+0.521*aqt_scale,event.y+-7.558*aqt_scale,
event.x+0.083*aqt_scale,event.y+-7.617*aqt_scale,
event.x+-0.354*aqt_scale,event.y+-7.558*aqt_scale,
event.x+-0.634*aqt_scale,event.y+-7.1*aqt_scale,
event.x+-0.733*aqt_scale,event.y+-6.223*aqt_scale,
event.x+-1.052*aqt_scale,event.y+-5.048*aqt_scale,
event.x+-1.75*aqt_scale,event.y+-3.594*aqt_scale,
event.x+-2.666*aqt_scale,event.y+-2.339*aqt_scale,
event.x+-4.02*aqt_scale,event.y+-1.124*aqt_scale,
event.x+-5.455*aqt_scale,event.y+-0.247*aqt_scale,
event.x+-7.088*aqt_scale,event.y+0.331*aqt_scale,
event.x+-7.726*aqt_scale,event.y+1.147*aqt_scale,
event.x+-7.726*aqt_scale,event.y+3.418*aqt_scale,
fill="black", outline="black", stipple="gray25",
tags=("_shape:aqt5"))
elif self._radio_selection.get() == FREEFORM_POLYGON:
# draw a vertex for the polygon
vertex_size = 2
self._cursor_shape = self._target_canvas.create_oval(
event.x - vertex_size,
event.y - vertex_size,
event.x + vertex_size,
event.y + vertex_size,
fill="black", tags=("_shape:vertex"))
# draw a dashed line between this vertex and the last
# vertex drawn
if len(self._freeform_vertices_points) > 0:
last_point = self._freeform_vertices_points[-1]
self._freeform_temp_line_id = self._target_canvas.create_line(
last_point,
event.x, event.y,
dash=(4,4), tags="_shape:freeform_edge")
def canvas_delete_region(self, event):
if (self._selected_region is not None and
self._selected_region != CANVAS_BACKGROUND):
for shape in self._selected_region:
self._regions.remove(shape)
event.widget.delete(self._selected_region)
self._selected_region = None
def toggle_tag_editor(self):
if self._tag_popup_state.get()==True:
x = (self._tags_button.winfo_x() +
(self._tags_button.winfo_width() / 2))
y = (self._tags_button.winfo_y() +
(self._tags_button.winfo_height() * 1.5))
self._tag_editor.show(
self._target_canvas.gettags(self._selected_region), x, y)
else:
self._tag_editor.hide()
def update_tags(self, new_tag_list):
# delete all of the non-internal tags
for tag in self._target_canvas.gettags(self._selected_region):
if not tag.startswith("_"):
self._target_canvas.dtag(self._selected_region,
tag)
# add all tags in the new tag list
tags = self._target_canvas.gettags(self._selected_region)
self._target_canvas.itemconfig(self._selected_region,
tags=tags + new_tag_list)
def build_gui(self, parent, webcam_image):
# Create the main window
self._window = Tkinter.Toplevel(parent)
self._window.transient(parent)
self._window.title("Target Editor")
self._frame = ttk.Frame(self._window)
self._frame.pack(padx=15, pady=15)
self.create_toolbar(self._frame)
# Create tags popup frame
self._tag_editor = TagEditorPopup(self._window, self.update_tags)
# Create the canvas the target will be drawn on
# and show the webcam frame in it
self._webcam_image = webcam_image
self._target_canvas = Tkinter.Canvas(self._frame,
width=webcam_image.width(), height=webcam_image.height())
self._target_canvas.create_image(0, 0, image=self._webcam_image,
anchor=Tkinter.NW, tags=("background"))
self._target_canvas.pack()
self._target_canvas.bind('<ButtonPress-1>', self.canvas_click)
self._target_canvas.bind('<Motion>', self.canvas_mouse_move)
self._target_canvas.bind('<Delete>', self.canvas_delete_region)
self._target_canvas.bind('<Control-z>', self.undo_vertex)
self._target_canvas.bind('<ButtonPress-3>', self.canvas_right_click)
self._canvas_manager = CanvasManager(self._target_canvas, self._image_regions_images)
# Align this window with it's parent otherwise it ends up all kinds of
# crazy places when multiple monitors are used
parent_x = parent.winfo_rootx()
parent_y = parent.winfo_rooty()
self._window.geometry("+%d+%d" % (parent_x+20, parent_y+20))
def create_toolbar(self, parent):
# Create the toolbar
toolbar = Tkinter.Frame(parent, bd=1, relief=Tkinter.RAISED)
self._radio_selection = Tkinter.IntVar()
self._radio_selection.set(CURSOR)
# Save button
self._save_icon = Image.open("images/gnome_media_floppy.png")
self.create_toolbar_button(toolbar, self._save_icon,
self.save_target, "Save Target")
# cursor button
self._cursor_icon = Image.open("images/cursor.png")
self.create_radio_button(toolbar, self._cursor_icon, "Select Region", CURSOR)
# image button
self._image_icon = Image.open("images/gnome_image_x_generic.png")
self.create_radio_button(toolbar, self._image_icon, "Draw Image", IMAGE)
# rectangle button
self._rectangle_icon = Image.open("images/rectangle.png")
self.create_radio_button(toolbar, self._rectangle_icon, "Draw Rectangle", RECTANGLE)
# oval button
self._oval_icon = Image.open("images/oval.png")
self.create_radio_button(toolbar, self._oval_icon, "Draw Oval", OVAL)
# triangle button
self._triangle_icon = Image.open("images/triangle.png")
self.create_radio_button(toolbar, self._triangle_icon, "Draw Triangle", TRIANGLE)
# Appleseed D Silhouette 3 button
self._d_silhouette_3_icon = Image.open("images/appleseed_d_silhouette_3.png")
self.create_radio_button(toolbar, self._d_silhouette_3_icon, "Draw D Silhouette 3", D_SILHOUETTE_3)
# Appleseed D Silhouette 4 button
self._d_silhouette_4_icon = Image.open("images/appleseed_d_silhouette_4.png")
self.create_radio_button(toolbar, self._d_silhouette_4_icon, "Draw D Silhouette 4", D_SILHOUETTE_4)
# Appleseed D Silhouette 5 button
self._d_silhouette_5_icon = Image.open("images/appleseed_d_silhouette_5.png")
self.create_radio_button(toolbar, self._d_silhouette_5_icon, "Draw D Silhouette 5", D_SILHOUETTE_5)
# freeform polygon button
self._freeform_polygon_icon = Image.open("images/freeform_polygon.png")
self.create_radio_button(toolbar, self._freeform_polygon_icon, "Draw Freeform Polygon", FREEFORM_POLYGON)
# bring forward button
self._bring_forward_icon = Image.open("images/bring_forward.png")
self.create_toolbar_button(toolbar, self._bring_forward_icon,
self.bring_forward, "Bring Forward")
# send backward button
self._send_backward_icon = Image.open("images/send_backward.png")
self.create_toolbar_button(toolbar, self._send_backward_icon,
self.send_backward, "Send Backward")
# show tags button
tags_icon = ImageTk.PhotoImage(Image.open("images/tags.png"))
self._tag_popup_state = Tkinter.IntVar()
self._tags_button = Tkinter.Checkbutton(toolbar,
image=tags_icon, indicatoron=False, variable=self._tag_popup_state,
command=self.toggle_tag_editor, state=Tkinter.DISABLED)
self._tags_button.image = tags_icon
self._tags_button.pack(side=Tkinter.LEFT, padx=2, pady=2)
self.create_tooltip(self._tags_button, "Edit Selected Region's Tags")
# color chooser
self._fill_color_combo = ttk.Combobox(toolbar,
values=["black", "blue", "green", "orange", "red", "white"],
state="readonly")
self._fill_color_combo.set("black")
self._fill_color_combo.bind("<<ComboboxSelected>>", self.color_selected)
self._fill_color_combo.configure(state=Tkinter.DISABLED)
self._fill_color_combo.pack(side=Tkinter.LEFT, padx=2, pady=2)
self.create_tooltip(self._fill_color_combo, "Set Selected Region's Fill Color")
toolbar.pack(fill=Tkinter.X)
def create_radio_button(self, parent, image, tooltip, selected_value):
icon = ImageTk.PhotoImage(image)
button = Tkinter.Radiobutton(parent, image=icon,
indicatoron=False, variable=self._radio_selection,
value=selected_value, command=self.radio_button_click)
button.image = icon
button.pack(side=Tkinter.LEFT, padx=2, pady=2)
self.create_tooltip(button, tooltip)
def create_toolbar_button(self, parent, image, command, tooltip, enabled=True):
icon = ImageTk.PhotoImage(image)
button = Tkinter.Button(parent, image=icon, relief=Tkinter.RAISED, command=command)
if not enabled:
button.configure(state=Tkinter.DISABLED)
button.image = icon
button.pack(side=Tkinter.LEFT, padx=2, pady=2)
self.create_tooltip(button, tooltip)
def create_tooltip(self, widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
# target is set when we are editing a target,
# otherwise we are creating a new target
# notifynewfunc is a callback that can be set to see
# when a new target is saved (e.g. the save button is
# hit AND results in a new file). The callback takes
# one parameter (the targets file name)
def __init__(self, parent, webcam_image, target=None,
notifynewfunc=None):
self._cursor_shape = None
self._selected_region = None
self._regions = []
self._freeform_vertices_points = []
self._freeform_vertices_ids = []
self._freeform_edges_ids = []
self._freeform_temp_line_id = None
self._image_regions_images = {}
self.build_gui(parent, webcam_image)
if target is not None:
target_pickler = TargetPickler()
(region_object, self._regions) = target_pickler.load(
target, self._target_canvas, self._canvas_manager,
self._image_regions_images)
self._notify_new_target = notifynewfunc
# From: http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml
# This is used instead of Tix because we'd have to convert every widget in the
# editor to use Tix, but we some attributes (e.g. images) that aren't working right
# with Tix in Ubuntu 14.04
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = Tkinter.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except Tkinter.TclError:
pass
label = Tkinter.Label(tw, text=self.text, justify=Tkinter.LEFT,
background="#ffffe0", relief=Tkinter.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
| {
"content_hash": "acb16beb39150541bb35aba52028378b",
"timestamp": "",
"source": "github",
"line_count": 633,
"max_line_length": 113,
"avg_line_length": 42.993680884676145,
"alnum_prop": 0.5769979790556679,
"repo_name": "phrack/ShootOFF-legacy",
"id": "6d85462e3e19ccf05835535fb394d19a3f5b9576",
"size": "27368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "target_editor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "170196"
}
],
"symlink_target": ""
} |
"""Util addons functions."""
import hashlib
import re
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
RE_SHA1 = re.compile(r"[a-f0-9]{8}")
def get_hash_from_repository(name):
"""Generate a hash from repository."""
key = name.lower().encode()
return hashlib.sha1(key).hexdigest()[:8]
def extract_hash_from_path(path):
"""Extract repo id from path."""
repo_dir = path.parts[-1]
if not RE_SHA1.match(repo_dir):
return get_hash_from_repository(repo_dir)
return repo_dir
| {
"content_hash": "d88bc144f44e8dd2c23f2b112c1c812b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 49,
"avg_line_length": 23.80952380952381,
"alnum_prop": 0.642,
"repo_name": "pvizeli/hassio",
"id": "152c288668439ff9164d79d9ef0bde2175ab075a",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "hassio/addons/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "103291"
},
{
"name": "Python",
"bytes": "113129"
}
],
"symlink_target": ""
} |
"""
Plugin for reactions to specific messages based on combination of network, channel, and specific
phrase, when all of them are matched using regular expressions.
HexChat Python Interface: http://hexchat.readthedocs.io/en/latest/script_python.html
IRC String Formatting: https://github.com/myano/jenni/wiki/IRC-String-Formatting
"""
import logging
from os import path
import re
import sys
import hexchat
__module_name__ = 'bot_regex'
__module_description__ = 'Simple private bot'
__module_version__ = '1.0'
LOG = '~/bot_regex.log'
FORMAT = '%(asctime)-24s %(levelname)-9s %(message)s'
logging.basicConfig(filename=path.expanduser(LOG), format=FORMAT, level=logging.DEBUG)
def handle_exception(exc_type, exc_value, exc_traceback):
logging.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
def on_case_expand(r):
hexchat.prnt('\x0313 https://access.redhat.com/support/cases/#/case/{}'.format(r.group(1)))
# All regexes are checked as case insensitive
# Network | Channel | Phrase
REGEXES = {
r'RedHat': {
r'#sbr-security': [
(r'14(\d{8})', on_case_expand),
],
},
}
def check_debug(network, channel, phrase):
"""
Function for checking if message passes some or all regex matches. For message to call callback
it needs to pass network, channel, and phrase test. Useful for regex debugging.
Args:
network (str): active network
channel (str): active channel
phrase (str): checked phrase
Returns:
list: list of regexes which matched and callback function if one should be called
"""
results = []
for checked_network in REGEXES:
if re.search(checked_network, network, re.IGNORECASE):
results.append([checked_network])
for checked_channel in REGEXES[checked_network]:
if re.search(checked_channel, channel, re.IGNORECASE):
results.append([checked_network, checked_channel])
for checked_phrase, callback in REGEXES[checked_network][checked_channel]:
if re.search(checked_phrase, phrase, re.IGNORECASE):
results.append([checked_network, checked_channel, checked_phrase,
callback])
return results
def check(network, channel, phrase):
"""
Function for checking if message should call callback based on its phrase, network, and channel.
It calls appropriate callback if needed with SRE_Match object from the last comparison as
the first argument.
Args:
network (str): active network
channel (str): active channel
phrase (str): checked phrase
"""
for checked_network in REGEXES:
if re.search(checked_network, network, re.IGNORECASE):
for checked_channel in REGEXES[checked_network]:
if re.search(checked_channel, channel, re.IGNORECASE):
for checked_phrase, callback in REGEXES[checked_network][checked_channel]:
r = re.search(checked_phrase, phrase, re.IGNORECASE)
if r:
logging.info('Phrase: "{}"'.format(repr(phrase)))
callback(r)
def on_debug(word, word_eol, userdata):
"""
Callback function for 'bot-debug' command, which can be used for debugging this plugin and
regex config.
Command usage:
/bot-debug Tested phrase which will or will call callback
"""
phrase = word_eol[1]
network = hexchat.get_info('network')
channel = hexchat.get_info('channel')
hexchat.prnt('\x032 active_network = "{}"'.format(network))
hexchat.prnt('\x032 active_channel = "{}"'.format(channel))
hexchat.prnt('\x032 phrase = "{}"'.format(phrase))
results = check_debug(network, channel, phrase)
for line in results:
result = ''
if len(line) > 3:
result = '\x034 ==> CALLBACK'
hexchat.prnt('\x032 -> {}{}'.format(' | '.join(['"{}"'.format(a) for a in line]), result))
check(network, channel, phrase)
return hexchat.EAT_ALL
def on_check_msg(word, word_eol, userdata):
"""
Callback function for checking if phrase needs to invoke a callback function.
"""
phrase = word[1]
network = hexchat.get_info('network')
channel = hexchat.get_info('channel')
check(network, channel, phrase)
return hexchat.EAT_NONE
hexchat.prnt('{}, version {}'.format(__module_name__, __module_version__))
hexchat.hook_print('Channel Message', on_check_msg)
hexchat.hook_print('Channel Action', on_check_msg)
hexchat.hook_command('bot-debug', on_debug)
| {
"content_hash": "d96a1708e7c607afdde645d3fb6f8d91",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 100,
"avg_line_length": 36.18939393939394,
"alnum_prop": 0.6424534226501989,
"repo_name": "skontar/hexchat-plugins",
"id": "6ad6ccbc13dc66fa23b68b7c65770892502d7370",
"size": "4777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot_regex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27935"
}
],
"symlink_target": ""
} |
"""
Test DualMap
------------
"""
from jinja2 import Template
import folium
import folium.plugins
from folium.utilities import normalize
def test_dual_map():
m = folium.plugins.DualMap((0, 0))
folium.FeatureGroup(name="both").add_to(m)
folium.FeatureGroup(name="left").add_to(m.m1)
folium.FeatureGroup(name="right").add_to(m.m2)
figure = m.get_root()
assert isinstance(figure, folium.Figure)
out = normalize(figure.render())
script = '<script src="https://cdn.jsdelivr.net/gh/jieter/Leaflet.Sync/L.Map.Sync.min.js"></script>' # noqa
assert script in out
tmpl = Template(
"""
{{ this.m1.get_name() }}.sync({{ this.m2.get_name() }});
{{ this.m2.get_name() }}.sync({{ this.m1.get_name() }});
"""
)
assert normalize(tmpl.render(this=m)) in out
| {
"content_hash": "ee98d878611948063fa1553b0ba5e0bd",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 112,
"avg_line_length": 24.294117647058822,
"alnum_prop": 0.6174334140435835,
"repo_name": "python-visualization/folium",
"id": "bf15d66c80f67ac1b213301bd684ccc6b5c5adf4",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/plugins/test_dual_map.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39887"
},
{
"name": "JavaScript",
"bytes": "268"
},
{
"name": "Python",
"bytes": "375811"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19451 if testnet else 9451
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "0086a96355186d4d1ebbd73b79a6937f",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.392857142857146,
"alnum_prop": 0.6155038759689923,
"repo_name": "wayneclancy/creepercoin",
"id": "3ec8c8d60e87f47c42f9f30c7aa75d603b366d0f",
"size": "10053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103297"
},
{
"name": "C++",
"bytes": "2523089"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "14708"
},
{
"name": "Objective-C",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69729"
},
{
"name": "Shell",
"bytes": "9702"
},
{
"name": "TypeScript",
"bytes": "5248749"
}
],
"symlink_target": ""
} |
import json
import os
import stat
import tarfile
import zipfile
from datetime import datetime, timedelta
from django.conf import settings
from django.core.files import temp
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
import mock
import responses
import six
from pyquery import PyQuery as pq
from six import text_type
from six.moves.urllib_parse import urlencode
from waffle.testutils import override_switch
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import (
Addon, AddonCategory, AddonReviewerFlags, Category)
from olympia.amo.tests import (
TestCase, addon_factory, create_default_webext_appversion, formset,
initial, version_factory)
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.urlresolvers import reverse
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.constants.licenses import LICENSES_BY_BUILTIN
from olympia.devhub import views
from olympia.files.tests.test_models import UploadTest
from olympia.files.utils import parse_addon
from olympia.lib.akismet.models import AkismetReport
from olympia.lib.git import AddonGitRepository
from olympia.users.models import UserProfile
from olympia.versions.models import License, VersionPreview
from olympia.zadmin.models import Config, set_config
def get_addon_count(name):
"""Return the number of addons with the given name."""
return Addon.unfiltered.filter(name__localized_string=name).count()
def _parse_addon_theme_permission_wrapper(*args, **kwargs):
parsed = parse_addon(*args, **kwargs)
parsed['permissions'] = parsed.get('permissions', []) + ['theme']
return parsed
class TestSubmitPersona(TestCase):
fixtures = ['base/user_999']
def setUp(self):
super(TestSubmitPersona, self).setUp()
assert self.client.login(email='regular@mozilla.com')
self.url = reverse('devhub.themes.submit')
def get_img_urls(self):
return (
reverse('devhub.personas.upload_persona', args=['persona_header']),
)
def test_img_urls(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
header_url = self.get_img_urls()[0]
assert doc('#id_header').attr('data-upload-url') == header_url
def test_img_size(self):
img = get_image_path('mozilla.png')
for url, img_type in zip(self.get_img_urls(), ('header', )):
r_ajax = self.client.post(url, {'upload_image': open(img, 'rb')})
r_json = json.loads(r_ajax.content)
w, h = amo.PERSONA_IMAGE_SIZES.get(img_type)[1]
assert r_json['errors'] == [
'Image must be exactly %s pixels wide '
'and %s pixels tall.' % (w, h)]
def test_img_wrongtype(self):
img = open('static/js/impala/global.js', 'rb')
for url in self.get_img_urls():
r_ajax = self.client.post(url, {'upload_image': img})
r_json = json.loads(r_ajax.content)
assert r_json['errors'] == ['Images must be either PNG or JPG.']
class TestSubmitBase(TestCase):
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/users']
def setUp(self):
super(TestSubmitBase, self).setUp()
assert self.client.login(email='del@icio.us')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = self.get_addon()
def get_addon(self):
return Addon.objects.get(pk=3615)
def get_version(self):
return self.get_addon().versions.latest()
def generate_source_zip(self, suffix='.zip', data='z' * (2 ** 21),
compression=zipfile.ZIP_DEFLATED):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=suffix, dir=tdir)
with zipfile.ZipFile(source, 'w', compression=compression) as zip_file:
zip_file.writestr('foo', data)
source.seek(0)
return source
def generate_source_tar(
self, suffix='.tar.gz', data='t' * (2 ** 21), mode=None):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=suffix, dir=tdir)
if mode is None:
mode = 'w:bz2' if suffix.endswith('.tar.bz2') else 'w:gz'
with tarfile.open(fileobj=source, mode=mode) as tar_file:
tar_info = tarfile.TarInfo('foo')
tar_info.size = len(data)
tar_file.addfile(tar_info, six.StringIO(data))
source.seek(0)
return source
def generate_source_garbage(self, suffix='.zip', data='g' * (2 ** 21)):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=suffix, dir=tdir)
source.write(data)
source.seek(0)
return source
class TestAddonSubmitAgreementWithPostReviewEnabled(TestSubmitBase):
def test_set_read_dev_agreement(self):
response = self.client.post(reverse('devhub.submit.agreement'), {
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 302
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
def test_set_read_dev_agreement_error(self):
set_config('last_dev_agreement_change_date', '2018-01-01 00:00')
before_agreement_last_changed = (
datetime(2018, 1, 1) - timedelta(days=1))
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.post(reverse('devhub.submit.agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert form.is_valid() is False
assert form.errors == {
'distribution_agreement': [u'This field is required.'],
'review_policy': [u'This field is required.'],
}
doc = pq(response.content)
for id_ in form.errors.keys():
selector = 'li input#id_%s + a + .errorlist' % id_
assert doc(selector).text() == 'This field is required.'
def test_read_dev_agreement_skip(self):
after_agreement_last_changed = (
datetime(2018, 1, 1) + timedelta(days=1))
self.user.update(read_dev_agreement=after_agreement_last_changed)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_set_to_future(self):
set_config('last_dev_agreement_change_date', '2099-12-31 00:00')
read_dev_date = datetime(2018, 1, 1)
self.user.update(read_dev_agreement=read_dev_date)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_set_to_future_not_agreed_yet(self):
set_config('last_dev_agreement_change_date', '2099-12-31 00:00')
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
def test_read_dev_agreement_invalid_date_agreed_post_fallback(self):
set_config('last_dev_agreement_change_date', '2099-25-75 00:00')
read_dev_date = datetime(2018, 1, 1)
self.user.update(read_dev_agreement=read_dev_date)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_invalid_date_not_agreed_post_fallback(self):
set_config('last_dev_agreement_change_date', '2099,31,12,0,0')
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
self.assertRaises(ValueError)
assert response.status_code == 200
assert 'agreement_form' in response.context
def test_read_dev_agreement_no_date_configured_agreed_post_fallback(self):
response = self.client.get(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
def test_read_dev_agreement_no_date_configured_not_agreed_post_fallb(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
def test_read_dev_agreement_captcha_inactive(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
form = response.context['agreement_form']
assert 'recaptcha' not in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha') == []
@override_switch('addon-submission-captcha', active=True)
def test_read_dev_agreement_captcha_active_error(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
response = self.client.post(reverse('devhub.submit.agreement'))
# Captcha is properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
assert 'recaptcha' in response.context['agreement_form'].errors
@override_switch('addon-submission-captcha', active=True)
def test_read_dev_agreement_captcha_active_success(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.agreement'))
assert response.status_code == 200
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
# Captcha is also properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
verify_data = urlencode({
'secret': '',
'remoteip': '127.0.0.1',
'response': 'test',
})
responses.add(
responses.GET,
'https://www.google.com/recaptcha/api/siteverify?' + verify_data,
json={'error-codes': [], 'success': True})
response = self.client.post(reverse('devhub.submit.agreement'), data={
'g-recaptcha-response': 'test',
'distribution_agreement': 'on',
'review_policy': 'on',
})
assert response.status_code == 302
assert response['Location'] == reverse('devhub.submit.distribution')
class TestAddonSubmitDistribution(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestAddonSubmitDistribution, self).setUp()
self.client.login(email='regular@mozilla.com')
self.user = UserProfile.objects.get(email='regular@mozilla.com')
def test_check_agreement_okay(self):
response = self.client.post(reverse('devhub.submit.agreement'))
self.assert3xx(response, reverse('devhub.submit.distribution'))
response = self.client.get(reverse('devhub.submit.distribution'))
assert response.status_code == 200
# No error shown for a redirect from previous step.
assert 'This field is required' not in response.content
def test_submit_notification_warning(self):
config = Config.objects.create(
key='submit_notification_warning',
value='Text with <a href="http://example.com">a link</a>.')
response = self.client.get(reverse('devhub.submit.distribution'))
assert response.status_code == 200
doc = pq(response.content)
assert doc('.notification-box.warning').html().strip() == config.value
def test_redirect_back_to_agreement(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(
reverse('devhub.submit.distribution'), follow=True)
self.assert3xx(response, reverse('devhub.submit.agreement'))
# read_dev_agreement needs to be a more recent date than
# the setting.
set_config('last_dev_agreement_change_date', '2018-01-01 00:00')
before_agreement_last_changed = (
datetime(2018, 1, 1) - timedelta(days=1))
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.get(
reverse('devhub.submit.distribution'), follow=True)
self.assert3xx(response, reverse('devhub.submit.agreement'))
def test_listed_redirects_to_next_step(self):
response = self.client.post(reverse('devhub.submit.distribution'),
{'channel': 'listed'})
self.assert3xx(response,
reverse('devhub.submit.upload', args=['listed']))
def test_unlisted_redirects_to_next_step(self):
response = self.client.post(reverse('devhub.submit.distribution'),
{'channel': 'unlisted'})
self.assert3xx(response, reverse('devhub.submit.upload',
args=['unlisted']))
def test_channel_selection_error_shown(self):
url = reverse('devhub.submit.distribution')
# First load should have no error
assert 'This field is required' not in self.client.get(url).content
# Load with channel preselected (e.g. back from next step) - no error.
assert 'This field is required' not in self.client.get(
url, args=['listed']).content
# A post submission without channel selection should be an error
assert 'This field is required' in self.client.post(url).content
class TestAddonSubmitUpload(UploadTest, TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestAddonSubmitUpload, self).setUp()
create_default_webext_appversion()
self.upload = self.get_upload('webextension_no_id.xpi')
assert self.client.login(email='regular@mozilla.com')
self.client.post(reverse('devhub.submit.agreement'))
def post(self, compatible_apps=None, expect_errors=False,
listed=True, status_code=200, url=None, extra_kwargs=None):
if compatible_apps is None:
compatible_apps = [amo.FIREFOX, amo.ANDROID]
data = {
'upload': self.upload.uuid.hex,
'compatible_apps': [p.id for p in compatible_apps]
}
url = url or reverse('devhub.submit.upload',
args=['listed' if listed else 'unlisted'])
response = self.client.post(
url, data, follow=True, **(extra_kwargs or {}))
assert response.status_code == status_code
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'new_addon_form' in response.context:
assert (
response.context['new_addon_form'].errors.as_text() == '')
return response
def test_unique_name(self):
addon_factory(name='Beastify')
self.post(expect_errors=False)
def test_unlisted_name_not_unique(self):
"""We don't enforce name uniqueness for unlisted add-ons."""
addon_factory(name='Beastify',
version_kw={'channel': amo.RELEASE_CHANNEL_LISTED})
assert get_addon_count('Beastify') == 1
# We're not passing `expected_errors=True`, so if there was any errors
# like "This name is already in use. Please choose another one", the
# test would fail.
response = self.post()
# Kind of redundant with the `self.post()` above: we just want to make
# really sure there's no errors raised by posting an add-on with a name
# that is already used by an unlisted add-on.
assert 'new_addon_form' not in response.context
assert get_addon_count('Beastify') == 2
def test_name_not_unique_between_types(self):
"""We don't enforce name uniqueness between add-ons types."""
addon_factory(name='Beastify', type=amo.ADDON_THEME)
assert get_addon_count('Beastify') == 1
# We're not passing `expected_errors=True`, so if there was any errors
# like "This name is already in use. Please choose another one", the
# test would fail.
response = self.post()
# Kind of redundant with the `self.post()` above: we just want to make
# really sure there's no errors raised by posting an add-on with a name
# that is already used by an unlisted add-on.
assert 'new_addon_form' not in response.context
assert get_addon_count('Beastify') == 2
def test_success_listed(self):
assert Addon.objects.count() == 0
response = self.post()
addon = Addon.objects.get()
version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
assert version
assert version.channel == amo.RELEASE_CHANNEL_LISTED
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
log_items = ActivityLog.objects.for_addons(addon)
assert log_items.filter(action=amo.LOG.CREATE_ADDON.id), (
'New add-on creation never logged.')
assert not addon.tags.filter(tag_text='dynamic theme').exists()
@mock.patch('olympia.reviewers.utils.sign_file')
def test_success_unlisted(self, mock_sign_file):
"""Sign automatically."""
assert Addon.objects.count() == 0
# No validation errors or warning.
result = {
'errors': 0,
'warnings': 0,
'notices': 2,
'metadata': {},
'messages': [],
}
self.upload = self.get_upload(
'extension.xpi', validation=json.dumps(result))
self.post(listed=False)
addon = Addon.objects.get()
version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert version
assert version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert addon.status == amo.STATUS_NULL
assert mock_sign_file.called
assert not addon.tags.filter(tag_text='dynamic theme').exists()
def test_missing_compatible_apps(self):
url = reverse('devhub.submit.upload', args=['listed'])
response = self.client.post(url, {'upload': self.upload.uuid.hex})
assert response.status_code == 200
assert response.context['new_addon_form'].errors.as_text() == (
'* compatible_apps\n * Need to select at least one application.')
doc = pq(response.content)
assert doc('ul.errorlist').text() == (
'Need to select at least one application.')
def test_default_supported_platforms(self):
"""Test that we default to PLATFORM_ALL during submission.
This is temporarily while we're in process of getting rid
of supported platforms.
https://github.com/mozilla/addons-server/issues/8752
"""
response = self.post()
addon = Addon.objects.get()
# Success, redirecting to source submission step.
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
# Check that `all_files` is correct
all_ = sorted([f.filename for f in addon.current_version.all_files])
assert all_ == [u'beastify-1.0-an+fx.xpi']
# Default to PLATFORM_ALL
assert addon.current_version.supported_platforms == [amo.PLATFORM_ALL]
# And check that compatible apps have a sensible default too
apps = addon.current_version.compatible_apps.keys()
assert sorted(apps) == sorted([amo.FIREFOX, amo.ANDROID])
@mock.patch('olympia.devhub.views.auto_sign_file')
def test_one_xpi_for_multiple_apps_unlisted_addon(
self, mock_auto_sign_file):
assert Addon.objects.count() == 0
response = self.post(
compatible_apps=[amo.FIREFOX, amo.ANDROID], listed=False)
addon = Addon.unfiltered.get()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
all_ = sorted([f.filename for f in latest_version.all_files])
assert all_ == [u'beastify-1.0-an+fx.xpi']
mock_auto_sign_file.assert_has_calls([
mock.call(f)
for f in latest_version.all_files])
def test_static_theme_wizard_button_shown(self):
response = self.client.get(reverse(
'devhub.submit.upload', args=['listed']), follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#wizardlink')
assert doc('#wizardlink').attr('href') == (
reverse('devhub.submit.wizard', args=['listed']))
response = self.client.get(reverse(
'devhub.submit.upload', args=['unlisted']), follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#wizardlink')
assert doc('#wizardlink').attr('href') == (
reverse('devhub.submit.wizard', args=['unlisted']))
def test_static_theme_submit_listed(self):
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post()
addon = Addon.objects.get()
self.assert3xx(
response, reverse('devhub.submit.details', args=[addon.slug]))
all_ = sorted([f.filename for f in addon.current_version.all_files])
assert all_ == [u'weta_fade-1.0.xpi'] # One XPI for all platforms.
assert addon.type == amo.ADDON_STATICTHEME
previews = list(addon.current_version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[2].image_path)
def test_static_theme_submit_unlisted(self):
assert Addon.unfiltered.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post(listed=False)
addon = Addon.unfiltered.get()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.assert3xx(
response, reverse('devhub.submit.finish', args=[addon.slug]))
all_ = sorted([f.filename for f in latest_version.all_files])
assert all_ == [u'weta_fade-1.0.xpi'] # One XPI for all platforms.
assert addon.type == amo.ADDON_STATICTHEME
# Only listed submissions need a preview generated.
assert latest_version.previews.all().count() == 0
def test_static_theme_wizard_listed(self):
# Check we get the correct template.
url = reverse('devhub.submit.wizard', args=['listed'])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '1.0'
assert doc('input#theme-name').attr('type') == 'text'
# And then check the upload works. In reality the zip is generated
# client side in JS but the zip file is the same.
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post(url=url)
addon = Addon.objects.get()
# Next step is same as non-wizard flow too.
self.assert3xx(
response, reverse('devhub.submit.details', args=[addon.slug]))
all_ = sorted([f.filename for f in addon.current_version.all_files])
assert all_ == [u'weta_fade-1.0.xpi'] # One XPI for all platforms.
assert addon.type == amo.ADDON_STATICTHEME
previews = list(addon.current_version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[2].image_path)
def test_static_theme_wizard_unlisted(self):
# Check we get the correct template.
url = reverse('devhub.submit.wizard', args=['unlisted'])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '1.0'
assert doc('input#theme-name').attr('type') == 'text'
# And then check the upload works. In reality the zip is generated
# client side in JS but the zip file is the same.
assert Addon.unfiltered.count() == 0
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post(url=url, listed=False)
addon = Addon.unfiltered.get()
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
# Next step is same as non-wizard flow too.
self.assert3xx(
response, reverse('devhub.submit.finish', args=[addon.slug]))
all_ = sorted([f.filename for f in latest_version.all_files])
assert all_ == [u'weta_fade-1.0.xpi'] # One XPI for all platforms.
assert addon.type == amo.ADDON_STATICTHEME
# Only listed submissions need a preview generated.
assert latest_version.previews.all().count() == 0
@mock.patch('olympia.devhub.forms.parse_addon',
wraps=_parse_addon_theme_permission_wrapper)
def test_listed_dynamic_theme_is_tagged(self, parse_addon_mock):
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/valid_webextension.xpi')
self.upload = self.get_upload(abspath=path)
response = self.post()
addon = Addon.objects.get()
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
assert addon.tags.filter(tag_text='dynamic theme').exists()
@mock.patch('olympia.devhub.forms.parse_addon',
wraps=_parse_addon_theme_permission_wrapper)
def test_unlisted_dynamic_theme_isnt_tagged(self, parse_addon_mock):
assert Addon.objects.count() == 0
path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/valid_webextension.xpi')
self.upload = self.get_upload(abspath=path)
response = self.post(listed=False)
addon = Addon.objects.get()
self.assert3xx(
response, reverse('devhub.submit.source', args=[addon.slug]))
assert not addon.tags.filter(tag_text='dynamic theme').exists()
class TestAddonSubmitSource(TestSubmitBase):
def setUp(self):
super(TestAddonSubmitSource, self).setUp()
assert not self.get_version().source
self.url = reverse('devhub.submit.source', args=[self.addon.slug])
self.next_url = reverse(
'devhub.submit.details', args=[self.addon.slug])
def post(self, has_source, source, expect_errors=False, status_code=200):
data = {
'has_source': 'yes' if has_source else 'no',
'source': source,
}
response = self.client.post(self.url, data, follow=True)
assert response.status_code == status_code
if not expect_errors:
# Show any unexpected form errors.
if response.context and 'form' in response.context:
assert response.context['form'].errors == {}
return response
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=1)
def test_submit_source(self):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
oct(os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_submit_source_targz(self):
response = self.post(
has_source=True, source=self.generate_source_tar())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
oct(os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_submit_source_tgz(self):
response = self.post(
has_source=True, source=self.generate_source_tar(
suffix='.tgz'))
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
oct(os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_submit_source_tarbz2(self):
response = self.post(
has_source=True, source=self.generate_source_tar(
suffix='.tar.bz2'))
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
oct(os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=1)
def test_say_no_but_submit_source_anyway_fails(self):
response = self.post(
has_source=False, source=self.generate_source_zip(),
expect_errors=True)
assert response.context['form'].errors == {
'source': [
u'Source file uploaded but you indicated no source was needed.'
]
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_say_yes_but_dont_submit_source_fails(self):
response = self.post(
has_source=True, source=None, expect_errors=True)
assert response.context['form'].errors == {
'source': [u'You have not uploaded a source file.']
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=2 ** 22)
def test_submit_source_in_memory_upload(self):
source = self.generate_source_zip()
source_size = os.stat(source.name)[stat.ST_SIZE]
assert source_size < settings.FILE_UPLOAD_MAX_MEMORY_SIZE
response = self.post(has_source=True, source=source)
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
oct(os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=2 ** 22)
def test_submit_source_in_memory_upload_with_targz(self):
source = self.generate_source_tar()
source_size = os.stat(source.name)[stat.ST_SIZE]
assert source_size < settings.FILE_UPLOAD_MAX_MEMORY_SIZE
response = self.post(has_source=True, source=source)
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
assert self.addon.needs_admin_code_review
mode = (
oct(os.stat(self.get_version().source.path)[stat.ST_MODE]))
assert mode == '0100644'
def test_with_bad_source_extension(self):
response = self.post(
has_source=True, source=self.generate_source_zip(suffix='.exe'),
expect_errors=True)
assert response.context['form'].errors == {
'source': [
u'Unsupported file type, please upload an archive file '
u'(.zip, .tar.gz, .tar.bz2).'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_non_compressed_tar(self):
response = self.post(
# Generate a .tar.gz which is actually not compressed.
has_source=True, source=self.generate_source_tar(mode='w'),
expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_bad_source_not_an_actual_archive(self):
response = self.post(
has_source=True, source=self.generate_source_garbage(
suffix='.zip'), expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_bad_source_broken_archive(self):
source = self.generate_source_zip(
data='Hello World', compression=zipfile.ZIP_STORED)
data = source.read().replace('Hello World', 'dlroW olleH')
source.seek(0) # First seek to rewrite from the beginning
source.write(data)
source.seek(0) # Second seek to reset like it's fresh.
# Still looks like a zip at first glance.
assert zipfile.is_zipfile(source)
source.seek(0) # Last seek to reset source descriptor before posting.
response = self.post(
has_source=True, source=source, expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_with_bad_source_broken_archive_compressed_tar(self):
source = self.generate_source_tar()
with open(source.name, "r+b") as fobj:
fobj.truncate(512)
# Still looks like a tar at first glance.
assert tarfile.is_tarfile(source.name)
# Re-open and post.
with open(source.name, 'rb'):
response = self.post(
has_source=True, source=source, expect_errors=True)
assert response.context['form'].errors == {
'source': [u'Invalid or broken archive.'],
}
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_no_source(self):
response = self.post(has_source=False, source=None)
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert not self.get_version().source
assert not self.addon.needs_admin_code_review
def test_non_extension_redirects_past_to_details(self):
# static themes should redirect
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
self.assert3xx(response, self.next_url)
# extensions shouldn't redirect
self.addon.update(type=amo.ADDON_EXTENSION)
response = self.client.get(self.url)
assert response.status_code == 200
# check another non-extension type also redirects
self.addon.update(type=amo.ADDON_DICT)
response = self.client.get(self.url)
self.assert3xx(response, self.next_url)
@override_settings(FILE_UPLOAD_MAX_MEMORY_SIZE=1)
@override_switch('enable-uploads-commit-to-git-storage', active=False)
def test_submit_source_doesnt_commit_to_git_by_default(self):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
repo = AddonGitRepository(self.addon.pk, package_type='source')
assert not os.path.exists(repo.git_repository_path)
@override_switch('enable-uploads-commit-to-git-storage', active=True)
def test_submit_source_commits_to_git(self):
response = self.post(
has_source=True, source=self.generate_source_zip())
self.assert3xx(response, self.next_url)
self.addon = self.addon.reload()
assert self.get_version().source
repo = AddonGitRepository(self.addon.pk, package_type='source')
assert os.path.exists(repo.git_repository_path)
class DetailsPageMixin(object):
""" Some common methods between TestAddonSubmitDetails and
TestStaticThemeSubmitDetails."""
def is_success(self, data):
assert self.get_addon().status == amo.STATUS_NULL
response = self.client.post(self.url, data)
assert all(self.get_addon().get_required_metadata())
assert response.status_code == 302
assert self.get_addon().status == amo.STATUS_NOMINATED
return response
def test_submit_name_existing(self):
"""Test that we can submit two add-ons with the same name."""
qs = Addon.objects.filter(name__localized_string='Cooliris')
assert qs.count() == 1
self.is_success(self.get_dict(name='Cooliris'))
assert qs.count() == 2
def test_submit_name_length(self):
# Make sure the name isn't too long.
data = self.get_dict(name='a' * 51)
response = self.client.post(self.url, data)
assert response.status_code == 200
error = 'Ensure this value has at most 50 characters (it has 51).'
self.assertFormError(response, 'form', 'name', error)
def test_submit_name_symbols_only(self):
data = self.get_dict(name='()+([#')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'name', error)
data = self.get_dict(name='±↡∋⌚')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'name', error)
# 'ø' is not a symbol, it's actually a letter, so it should be valid.
data = self.get_dict(name=u'ø')
response = self.client.post(self.url, data)
assert response.status_code == 302
assert self.get_addon().name == u'ø'
def test_submit_slug_invalid(self):
# Submit an invalid slug.
data = self.get_dict(slug='slug!!! aksl23%%')
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(response, 'form', 'slug', "Enter a valid 'slug'" +
' consisting of letters, numbers, underscores or '
'hyphens.')
def test_submit_slug_required(self):
# Make sure the slug is required.
response = self.client.post(self.url, self.get_dict(slug=''))
assert response.status_code == 200
self.assertFormError(
response, 'form', 'slug', 'This field is required.')
def test_submit_summary_required(self):
# Make sure summary is required.
response = self.client.post(self.url, self.get_dict(summary=''))
assert response.status_code == 200
self.assertFormError(
response, 'form', 'summary', 'This field is required.')
def test_submit_summary_symbols_only(self):
data = self.get_dict(summary='()+([#')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'summary', error)
data = self.get_dict(summary='±↡∋⌚')
response = self.client.post(self.url, data)
assert response.status_code == 200
error = (
'Ensure this field contains at least one letter or number'
' character.')
self.assertFormError(response, 'form', 'summary', error)
# 'ø' is not a symbol, it's actually a letter, so it should be valid.
data = self.get_dict(summary=u'ø')
response = self.client.post(self.url, data)
assert response.status_code == 302
assert self.get_addon().summary == u'ø'
def test_submit_summary_length(self):
# Summary is too long.
response = self.client.post(self.url, self.get_dict(summary='a' * 251))
assert response.status_code == 200
error = 'Ensure this value has at most 250 characters (it has 251).'
self.assertFormError(response, 'form', 'summary', error)
def test_nomination_date_set_only_once(self):
self.get_version().update(nomination=None)
self.is_success(self.get_dict())
self.assertCloseToNow(self.get_version().nomination)
# Check nomination date is only set once, see bug 632191.
nomdate = datetime.now() - timedelta(days=5)
self.get_version().update(nomination=nomdate, _signal=False)
# Update something else in the addon:
self.get_addon().update(slug='foobar')
assert self.get_version().nomination.timetuple()[0:5] == (
nomdate.timetuple()[0:5])
def test_submit_details_unlisted_should_redirect(self):
version = self.get_addon().versions.latest()
version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
self.assert3xx(response, self.next_step)
def test_can_cancel_review(self):
addon = self.get_addon()
addon.versions.latest().files.update(status=amo.STATUS_AWAITING_REVIEW)
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
versions_url = reverse('devhub.addons.versions', args=['a3615'])
response = self.client.post(cancel_url)
self.assert3xx(response, versions_url)
addon = self.get_addon()
assert addon.status == amo.STATUS_NULL
version = addon.versions.latest()
del version.all_files
assert version.statuses == [
(version.all_files[0].id, amo.STATUS_DISABLED)]
@override_switch('akismet-spam-check', active=False)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_waffle_off(self, comment_check_mock):
data = self.get_dict(name=u'spám')
self.is_success(data)
comment_check_mock.assert_not_called()
assert AkismetReport.objects.count() == 0
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_action_taken(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'The text entered has been flagged as spam.')
# the summary won't be comment_check'd because it didn't change.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) != u'spám'
comment_check_mock.assert_called_once()
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=False)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_logging_only(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# the summary won't be comment_check'd because it didn't change.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) == u'spám'
assert 'spam' not in response.content
comment_check_mock.assert_called_once()
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# the summary won't be comment_check'd because it didn't change.
assert comment_check_mock.call_count == 1
assert AkismetReport.objects.count() == 1
report = AkismetReport.objects.get()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert 'spam' not in response.content
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_no_changes(self, comment_check_mock):
# Don't change either name or summary from the upload.
data = self.get_dict(name=self.addon.name, summary=self.addon.summary)
self.is_success(data)
comment_check_mock.assert_not_called()
assert AkismetReport.objects.count() == 0
@override_switch('content-optimization', active=False)
def test_name_summary_lengths_short(self):
# check the separate name and summary labels, etc are served
response = self.client.get(self.url)
assert 'Name and Summary' not in response.content
assert 'It will be shown in listings and searches' in response.content
data = self.get_dict(name='a', summary='b')
self.is_success(data)
@override_switch('content-optimization', active=False)
def test_name_summary_lengths_long(self):
data = self.get_dict(name='a' * 50, summary='b' * 50)
self.is_success(data)
@override_switch('content-optimization', active=True)
def test_name_summary_lengths_content_optimization(self):
# check the combined name and summary label, etc are served
response = self.client.get(self.url)
assert 'Name and Summary' in response.content
# name and summary are too short
response = self.client.post(
self.url, self.get_dict(
name='a', summary='b', description='c' * 10))
assert self.get_addon().name != 'a'
assert self.get_addon().summary != 'b'
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'Ensure this value has at least 2 characters (it has 1).')
self.assertFormError(
response, 'form', 'summary',
'Ensure this value has at least 2 characters (it has 1).')
# name and summary individually are okay, but together are too long
response = self.client.post(
self.url, self.get_dict(
name='a' * 50, summary='b' * 50, description='c' * 10))
assert self.get_addon().name != 'a' * 50
assert self.get_addon().summary != 'b' * 50
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'Ensure name and summary combined are at most 70 characters '
u'(they have 100).')
# success: together name and summary are 70 characters.
data = self.get_dict(
name='a' * 2, summary='b' * 68, description='c' * 10)
self.is_success(data)
@override_switch('content-optimization', active=True)
def test_summary_auto_cropping_content_optimization(self):
# See test_forms.py::TestDescribeForm for some more variations.
data = self.get_dict(minimal=False)
data.pop('name')
data.pop('summary')
data.update({
'name_en-us': 'a' * 25,
'name_fr': 'b' * 30,
'summary_en-us': 'c' * 45,
'summary_fr': 'd' * 45, # 30 + 45 is > 70
})
self.is_success(data)
assert self.get_addon().name == 'a' * 25
assert self.get_addon().summary == 'c' * 45
with self.activate('fr'):
assert self.get_addon().name == 'b' * 30
assert self.get_addon().summary == 'd' * 40
@override_switch('content-optimization', active=True)
def test_name_auto_cropping_content_optimization(self):
# See test_forms.py::TestDescribeForm for some more variations.
data = self.get_dict(minimal=False)
data.pop('name')
data.pop('summary')
data.update({
'name_en-us': 'a' * 67,
'name_fr': 'b' * 69,
'summary_en-us': 'c' * 2,
'summary_fr': 'd' * 3,
})
self.is_success(data)
assert self.get_addon().name == 'a' * 67
assert self.get_addon().summary == 'c' * 2
with self.activate('fr'):
assert self.get_addon().name == 'b' * 68
assert self.get_addon().summary == 'd' * 2
class TestAddonSubmitDetails(DetailsPageMixin, TestSubmitBase):
def setUp(self):
super(TestAddonSubmitDetails, self).setUp()
self.url = reverse('devhub.submit.details', args=['a3615'])
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=1)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=71)).delete()
ctx = self.client.get(self.url).context['cat_form']
self.cat_initial = initial(ctx.initial_forms[0])
self.next_step = reverse('devhub.submit.finish', args=['a3615'])
License.objects.create(builtin=3, on_form=True)
self.get_addon().update(status=amo.STATUS_NULL)
def get_dict(self, minimal=True, **kw):
result = {}
describe_form = {'name': 'Test name', 'slug': 'testname',
'summary': 'Hello!', 'is_experimental': True,
'requires_payment': True}
if not minimal:
describe_form.update({'description': 'its a description',
'support_url': 'http://stackoverflow.com',
'support_email': 'black@hole.org'})
cat_initial = kw.pop('cat_initial', self.cat_initial)
cat_form = formset(cat_initial, initial_count=1)
license_form = {'license-builtin': 3}
policy_form = {} if minimal else {
'has_priv': True, 'privacy_policy': 'Ur data belongs to us now.'}
reviewer_form = {} if minimal else {'approval_notes': 'approove plz'}
result.update(describe_form)
result.update(cat_form)
result.update(license_form)
result.update(policy_form)
result.update(reviewer_form)
result.update(**kw)
return result
@override_switch('content-optimization', active=False)
def test_submit_success_required(self):
# Set/change the required fields only
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak
# in fields that shouldn't be modified via this form.
data = self.get_dict(homepage='foo.com',
tags='whatevs, whatever')
self.is_success(data)
addon = self.get_addon()
# This fields should not have been modified.
assert addon.homepage != 'foo.com'
assert len(addon.tags.values_list()) == 0
# These are the fields that are expected to be edited here.
assert addon.name == 'Test name'
assert addon.slug == 'testname'
assert addon.summary == 'Hello!'
assert addon.is_experimental
assert addon.requires_payment
assert addon.all_categories[0].id == 22
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_PROPERTIES.id), (
"Setting properties on submit needn't be logged.")
@override_switch('content-optimization', active=False)
def test_submit_success_optional_fields(self):
# Set/change the optional fields too
# Post and be redirected
data = self.get_dict(minimal=False)
self.is_success(data)
addon = self.get_addon()
# These are the fields that are expected to be edited here.
assert addon.description == 'its a description'
assert addon.support_url == 'http://stackoverflow.com'
assert addon.support_email == 'black@hole.org'
assert addon.privacy_policy == 'Ur data belongs to us now.'
assert addon.current_version.approval_notes == 'approove plz'
@override_switch('content-optimization', active=True)
def test_submit_success_required_with_content_optimization(self):
# Set/change the required fields only
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak
# in fields that shouldn't be modified via this form.
data = self.get_dict(
description='its a description', homepage='foo.com',
tags='whatevs, whatever')
self.is_success(data)
addon = self.get_addon()
# This fields should not have been modified.
assert addon.homepage != 'foo.com'
assert len(addon.tags.values_list()) == 0
# These are the fields that are expected to be edited here.
assert addon.name == 'Test name'
assert addon.slug == 'testname'
assert addon.summary == 'Hello!'
assert addon.description == 'its a description'
assert addon.is_experimental
assert addon.requires_payment
assert addon.all_categories[0].id == 22
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_PROPERTIES.id), (
"Setting properties on submit needn't be logged.")
@override_switch('content-optimization', active=True)
def test_submit_success_optional_fields_with_content_optimization(self):
# Set/change the optional fields too
# Post and be redirected
data = self.get_dict(minimal=False)
self.is_success(data)
addon = self.get_addon()
# These are the fields that are expected to be edited here.
assert addon.support_url == 'http://stackoverflow.com'
assert addon.support_email == 'black@hole.org'
assert addon.privacy_policy == 'Ur data belongs to us now.'
assert addon.current_version.approval_notes == 'approove plz'
def test_submit_categories_required(self):
del self.cat_initial['categories']
response = self.client.post(
self.url, self.get_dict(cat_initial=self.cat_initial))
assert response.context['cat_form'].errors[0]['categories'] == (
['This field is required.'])
def test_submit_categories_max(self):
assert amo.MAX_CATEGORIES == 2
self.cat_initial['categories'] = [22, 1, 71]
response = self.client.post(
self.url, self.get_dict(cat_initial=self.cat_initial))
assert response.context['cat_form'].errors[0]['categories'] == (
['You can have only 2 categories.'])
def test_submit_categories_add(self):
assert [cat.id for cat in self.get_addon().all_categories] == [22]
self.cat_initial['categories'] = [22, 1]
self.is_success(self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [1, 22]
def test_submit_categories_addandremove(self):
AddonCategory(addon=self.addon, category_id=1).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [1, 22]
self.cat_initial['categories'] = [22, 71]
self.client.post(self.url, self.get_dict(cat_initial=self.cat_initial))
category_ids_new = [c.id for c in self.get_addon().all_categories]
assert sorted(category_ids_new) == [22, 71]
def test_submit_categories_remove(self):
category = Category.objects.get(id=1)
AddonCategory(addon=self.addon, category=category).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [1, 22]
self.cat_initial['categories'] = [22]
self.client.post(self.url, self.get_dict(cat_initial=self.cat_initial))
category_ids_new = [cat.id for cat in self.get_addon().all_categories]
assert category_ids_new == [22]
def test_ul_class_rendering_regression(self):
"""Test ul of license widget doesn't render `license` class.
Regression test for:
* https://github.com/mozilla/addons-server/issues/8902
* https://github.com/mozilla/addons-server/issues/8920
"""
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
ul = doc('#id_license-builtin')
assert ul.attr('class') is None
def test_set_builtin_license_no_log(self):
self.is_success(self.get_dict(**{'license-builtin': 3}))
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert addon.current_version.license.builtin == 3
log_items = ActivityLog.objects.for_addons(self.get_addon())
assert not log_items.filter(action=amo.LOG.CHANGE_LICENSE.id)
def test_license_error(self):
response = self.client.post(
self.url, self.get_dict(**{'license-builtin': 4}))
assert response.status_code == 200
self.assertFormError(response, 'license_form', 'builtin',
'Select a valid choice. 4 is not one of '
'the available choices.')
def test_set_privacy_nomsg(self):
"""
You should not get punished with a 500 for not writing your policy...
but perhaps you should feel shame for lying to us. This test does not
test for shame.
"""
self.get_addon().update(eula=None, privacy_policy=None)
self.is_success(self.get_dict(has_priv=True))
def test_source_submission_notes_not_shown_by_default(self):
url = reverse('devhub.submit.source', args=[self.addon.slug])
response = self.client.post(url, {
'has_source': 'no',
'source': None,
}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert 'Remember: ' not in doc('.source-submission-note').text()
def test_source_submission_notes_shown(self):
url = reverse('devhub.submit.source', args=[self.addon.slug])
response = self.client.post(url, {
'has_source': 'yes', 'source': self.generate_source_zip(),
}, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert 'Remember: ' in doc('.source-submission-note').text()
class TestStaticThemeSubmitDetails(DetailsPageMixin, TestSubmitBase):
def setUp(self):
super(TestStaticThemeSubmitDetails, self).setUp()
self.url = reverse('devhub.submit.details', args=['a3615'])
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=1)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=22)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=71)).delete()
Category.from_static_category(CATEGORIES_BY_ID[300]).save() # abstract
Category.from_static_category(CATEGORIES_BY_ID[308]).save() # firefox
Category.from_static_category(CATEGORIES_BY_ID[400]).save() # abstract
Category.from_static_category(CATEGORIES_BY_ID[408]).save() # firefox
self.next_step = reverse('devhub.submit.finish', args=['a3615'])
License.objects.create(builtin=11, on_form=True, creative_commons=True)
self.get_addon().update(
status=amo.STATUS_NULL, type=amo.ADDON_STATICTHEME)
def get_dict(self, minimal=True, **kw):
result = {}
describe_form = {'name': 'Test name', 'slug': 'testname',
'summary': 'Hello!'}
if not minimal:
describe_form.update({'support_url': 'http://stackoverflow.com',
'support_email': 'black@hole.org'})
cat_form = {'category': 'abstract'}
license_form = {'license-builtin': 11}
result.update(describe_form)
result.update(cat_form)
result.update(license_form)
result.update(**kw)
return result
def test_submit_success_required(self):
# Set/change the required fields only
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak
# in fields that shouldn't be modified via this form.
data = self.get_dict(homepage='foo.com',
tags='whatevs, whatever')
self.is_success(data)
addon = self.get_addon()
# This fields should not have been modified.
assert addon.homepage != 'foo.com'
assert len(addon.tags.values_list()) == 0
# These are the fields that are expected to be edited here.
assert addon.name == 'Test name'
assert addon.slug == 'testname'
assert addon.summary == 'Hello!'
assert addon.all_categories[0].id == 300
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_PROPERTIES.id), (
"Setting properties on submit needn't be logged.")
def test_submit_success_optional_fields(self):
# Set/change the optional fields too
# Post and be redirected
data = self.get_dict(minimal=False)
self.is_success(data)
addon = self.get_addon()
# These are the fields that are expected to be edited here.
assert addon.support_url == 'http://stackoverflow.com'
assert addon.support_email == 'black@hole.org'
def test_submit_categories_set(self):
assert [cat.id for cat in self.get_addon().all_categories] == []
self.is_success(self.get_dict(category='firefox'))
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [308, 408]
def test_submit_categories_change(self):
category_desktop = Category.objects.get(id=300)
category_android = Category.objects.get(id=400)
AddonCategory(addon=self.addon, category=category_desktop).save()
AddonCategory(addon=self.addon, category=category_android).save()
assert sorted(
[cat.id for cat in self.get_addon().all_categories]) == [300, 400]
self.client.post(self.url, self.get_dict(category='firefox'))
category_ids_new = [cat.id for cat in self.get_addon().all_categories]
# Only ever one category for Static Themes
assert category_ids_new == [308, 408]
def test_creative_commons_licenses(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
assert content('#cc-chooser') # cc license wizard
assert content('#persona-license') # cc license result
assert content('#id_license-builtin') # license list
# There should be one license - 11 we added in setUp - and no 'other'.
assert len(content('input.license')) == 1
assert content('input.license').attr('value') == '11'
assert content('input.license').attr('data-name') == (
LICENSES_BY_BUILTIN[11].name)
def test_set_builtin_license_no_log(self):
self.is_success(self.get_dict(**{'license-builtin': 11}))
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert addon.current_version.license.builtin == 11
log_items = ActivityLog.objects.for_addons(self.get_addon())
assert not log_items.filter(action=amo.LOG.CHANGE_LICENSE.id)
def test_license_error(self):
response = self.client.post(
self.url, self.get_dict(**{'license-builtin': 4}))
assert response.status_code == 200
self.assertFormError(response, 'license_form', 'builtin',
'Select a valid choice. 4 is not one of '
'the available choices.')
class TestAddonSubmitFinish(TestSubmitBase):
def setUp(self):
super(TestAddonSubmitFinish, self).setUp()
self.url = reverse('devhub.submit.finish', args=[self.addon.slug])
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_for_newbies(self, send_welcome_email_mock):
self.client.get(self.url)
context = {
'addon_name': 'Delicious Bookmarks',
'app': six.text_type(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['del@icio.us'], context)
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_first_listed_addon(self, send_welcome_email_mock):
new_addon = addon_factory(
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED})
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
context = {
'addon_name': 'Delicious Bookmarks',
'app': six.text_type(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['del@icio.us'], context)
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_if_previous_addon_is_incomplete(
self, send_welcome_email_mock):
# If the developer already submitted an addon but didn't finish or was
# rejected, we send the email anyway, it might be a dupe depending on
# how far they got but it's better than not sending any.
new_addon = addon_factory(status=amo.STATUS_NULL)
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
context = {
'addon_name': 'Delicious Bookmarks',
'app': six.text_type(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['del@icio.us'], context)
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email(self, send_welcome_email_mock):
"""You already submitted an add-on? We won't spam again."""
new_addon = addon_factory(status=amo.STATUS_NOMINATED)
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
assert not send_welcome_email_mock.called
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email_if_unlisted(self, send_welcome_email_mock):
self.make_addon_unlisted(self.addon)
self.client.get(self.url)
assert not send_welcome_email_mock.called
def test_finish_submitting_listed_addon(self):
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
assert version.supported_platforms == ([amo.PLATFORM_ALL])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 3
# First link is to edit listing
assert links[0].attrib['href'] == self.addon.get_dev_url()
# Second link is to edit the version
assert links[1].attrib['href'] == reverse(
'devhub.versions.edit',
args=[self.addon.slug, version.id])
assert links[1].text == (
'Edit version %s' % version.version)
# Third back to my submissions.
assert links[2].attrib['href'] == reverse('devhub.addons')
def test_finish_submitting_unlisted_addon(self):
self.make_addon_unlisted(self.addon)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
# First link is to the file download.
file_ = latest_version.all_files[-1]
assert links[0].attrib['href'] == file_.get_url_path('devhub')
assert links[0].text == (
'Download %s' % file_.filename)
# Second back to my submissions.
assert links[1].attrib['href'] == reverse('devhub.addons')
def test_addon_no_versions_redirects_to_versions(self):
self.addon.update(status=amo.STATUS_NULL)
self.addon.versions.all().delete()
response = self.client.get(self.url, follow=True)
# Would go to 'devhub.submit.version' but no previous version means
# channel needs to be selected first.
self.assert3xx(
response,
reverse('devhub.submit.version.distribution', args=['a3615']), 302)
def test_incomplete_directs_to_details(self):
# We get bounced back to details step.
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(
reverse('devhub.submit.finish', args=['a3615']), follow=True)
self.assert3xx(
response, reverse('devhub.submit.details', args=['a3615']))
def test_finish_submitting_listed_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
VersionPreview.objects.create(version=version)
assert version.supported_platforms == ([amo.PLATFORM_ALL])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
# First link is to edit listing.
assert links[0].attrib['href'] == self.addon.get_dev_url()
# Second link is back to my submissions.
assert links[1].attrib['href'] == reverse('devhub.themes')
# Text is static theme specific.
assert "This version will be available after it passes review." in (
response.content)
# Show the preview we started generating just after the upload step.
imgs = content('section.addon-submission-process img')
assert imgs[0].attrib['src'] == (
version.previews.first().image_url)
assert len(imgs) == 1 # Just the one preview though.
def test_finish_submitting_unlisted_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
self.make_addon_unlisted(self.addon)
latest_version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
# First link is to the file download.
file_ = latest_version.all_files[-1]
assert links[0].attrib['href'] == file_.get_url_path('devhub')
assert links[0].text == (
'Download %s' % file_.filename)
# Second back to my submissions.
assert links[1].attrib['href'] == reverse('devhub.themes')
class TestAddonSubmitResume(TestSubmitBase):
def test_redirect_from_other_pages(self):
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(
reverse('devhub.addons.edit', args=['a3615']), follow=True)
self.assert3xx(
response, reverse('devhub.submit.details', args=['a3615']))
class TestVersionSubmitDistribution(TestSubmitBase):
def setUp(self):
super(TestVersionSubmitDistribution, self).setUp()
self.url = reverse('devhub.submit.version.distribution',
args=[self.addon.slug])
def test_listed_redirects_to_next_step(self):
response = self.client.post(self.url, {'channel': 'listed'})
self.assert3xx(
response,
reverse('devhub.submit.version.upload', args=[
self.addon.slug, 'listed']))
def test_unlisted_redirects_to_next_step(self):
response = self.client.post(self.url, {'channel': 'unlisted'})
self.assert3xx(
response,
reverse('devhub.submit.version.upload', args=[
self.addon.slug, 'unlisted']))
def test_no_redirect_for_metadata(self):
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(self.url)
assert response.status_code == 200
def test_has_read_agreement(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(self.url)
self.assert3xx(
response,
reverse('devhub.submit.version.agreement', args=[self.addon.slug]))
class TestVersionSubmitAutoChannel(TestSubmitBase):
""" Just check we chose the right upload channel. The upload tests
themselves are in other tests. """
def setUp(self):
super(TestVersionSubmitAutoChannel, self).setUp()
self.url = reverse('devhub.submit.version', args=[self.addon.slug])
@mock.patch('olympia.devhub.views._submit_upload',
side_effect=views._submit_upload)
def test_listed_last_uses_listed_upload(self, _submit_upload_mock):
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
self.client.post(self.url)
assert _submit_upload_mock.call_count == 1
args, _ = _submit_upload_mock.call_args
assert args[1:] == (
self.addon, amo.RELEASE_CHANNEL_LISTED,
'devhub.submit.version.source')
@mock.patch('olympia.devhub.views._submit_upload',
side_effect=views._submit_upload)
def test_unlisted_last_uses_unlisted_upload(self, _submit_upload_mock):
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
self.client.post(self.url)
assert _submit_upload_mock.call_count == 1
args, _ = _submit_upload_mock.call_args
assert args[1:] == (
self.addon, amo.RELEASE_CHANNEL_UNLISTED,
'devhub.submit.version.source')
def test_no_versions_redirects_to_distribution(self):
[v.delete() for v in self.addon.versions.all()]
response = self.client.post(self.url)
self.assert3xx(
response,
reverse('devhub.submit.version.distribution',
args=[self.addon.slug]))
def test_has_read_agreement(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(self.url)
self.assert3xx(
response,
reverse('devhub.submit.version.agreement', args=[self.addon.slug]))
class VersionSubmitUploadMixin(object):
channel = None
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(VersionSubmitUploadMixin, self).setUp()
self.upload = self.get_upload('extension.xpi')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.addon.update(guid='guid@xpi')
self.user = UserProfile.objects.get(email='del@icio.us')
assert self.client.login(email=self.user.email)
self.addon.versions.update(channel=self.channel)
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.url = reverse('devhub.submit.version.upload',
args=[self.addon.slug, channel])
assert self.addon.has_complete_metadata()
self.version.save()
def post(self, compatible_apps=None,
override_validation=False, expected_status=302, source=None,
extra_kwargs=None):
if compatible_apps is None:
compatible_apps = [amo.FIREFOX]
data = {
'upload': self.upload.uuid.hex,
'source': source,
'compatible_apps': [p.id for p in compatible_apps],
'admin_override_validation': override_validation
}
response = self.client.post(self.url, data, **(extra_kwargs or {}))
assert response.status_code == expected_status
return response
def get_next_url(self, version):
return reverse('devhub.submit.version.source', args=[
self.addon.slug, version.pk])
def test_missing_compatibility_apps(self):
response = self.client.post(self.url, {'upload': self.upload.uuid.hex})
assert response.status_code == 200
assert response.context['new_addon_form'].errors.as_text() == (
'* compatible_apps\n * Need to select at least one application.')
def test_unique_version_num(self):
self.version.update(version='0.1')
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 already exists.')
def test_same_version_if_previous_is_rejected(self):
# We can't re-use the same version number, even if the previous
# versions have been disabled/rejected.
self.version.update(version='0.1')
self.version.files.update(status=amo.STATUS_DISABLED)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 already exists.')
def test_same_version_if_previous_is_deleted(self):
# We can't re-use the same version number if the previous
# versions has been deleted either.
self.version.update(version='0.1')
self.version.delete()
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 was uploaded before and deleted.')
def test_same_version_if_previous_is_awaiting_review(self):
# We can't re-use the same version number - offer to continue.
self.version.update(version='0.1')
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'Version 0.1 already exists. '
'Continue with existing upload instead?')
# url is always to the details page even for unlisted (will redirect).
assert pq(response.content)('ul.errorlist a').attr('href') == (
reverse('devhub.submit.version.details', args=[
self.addon.slug, self.version.pk]))
def test_distribution_link(self):
response = self.client.get(self.url)
channel_text = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED
else 'unlisted')
distribution_url = reverse('devhub.submit.version.distribution',
args=[self.addon.slug])
doc = pq(response.content)
assert doc('.addon-submit-distribute a').attr('href') == (
distribution_url + '?channel=' + channel_text)
def test_url_is_404_for_disabled_addons(self):
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 404
def test_no_redirect_for_metadata(self):
self.addon.update(status=amo.STATUS_NULL)
AddonCategory.objects.filter(addon=self.addon).delete()
response = self.client.get(self.url)
assert response.status_code == 200
def test_static_theme_wizard_button_not_shown_for_extensions(self):
assert self.addon.type != amo.ADDON_STATICTHEME
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#wizardlink')
def test_static_theme_wizard_button_shown(self):
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#wizardlink')
assert doc('#wizardlink').attr('href') == (
reverse('devhub.submit.version.wizard',
args=[self.addon.slug, channel]))
def test_static_theme_wizard(self):
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.addon.update(type=amo.ADDON_STATICTHEME)
# Get the correct template.
self.url = reverse('devhub.submit.version.wizard',
args=[self.addon.slug, channel])
mock_point = 'olympia.devhub.views.extract_theme_properties'
with mock.patch(mock_point) as extract_theme_properties_mock:
extract_theme_properties_mock.return_value = {
'colors': {
'accentcolor': '#123456',
'textcolor': 'rgba(1,2,3,0.4)',
},
'images': {
'headerURL': 'header.png',
}
}
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '3.0'
assert doc('input#theme-name').attr('type') == 'hidden'
assert doc('input#theme-name').attr('value') == (
six.text_type(self.addon.name))
# Existing colors should be the default values for the fields
assert doc('#accentcolor').attr('value') == '#123456'
assert doc('#textcolor').attr('value') == 'rgba(1,2,3,0.4)'
# And the theme header url is there for the JS to load
assert doc('#theme-header').attr('data-existing-header') == (
'header.png')
# No warning about extra properties
assert 'are unsupported in this wizard' not in response.content
# And then check the upload works.
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post()
version = self.addon.find_latest_version(channel=self.channel)
assert version.channel == self.channel
assert version.all_files[0].status == (
amo.STATUS_AWAITING_REVIEW
if self.channel == amo.RELEASE_CHANNEL_LISTED else
amo.STATUS_PUBLIC)
self.assert3xx(response, self.get_next_url(version))
log_items = ActivityLog.objects.for_addons(self.addon)
assert log_items.filter(action=amo.LOG.ADD_VERSION.id)
if self.channel == amo.RELEASE_CHANNEL_LISTED:
previews = list(version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[1].image_path)
else:
assert version.previews.all().count() == 0
def test_static_theme_wizard_unsupported_properties(self):
channel = ('listed' if self.channel == amo.RELEASE_CHANNEL_LISTED else
'unlisted')
self.addon.update(type=amo.ADDON_STATICTHEME)
# Get the correct template.
self.url = reverse('devhub.submit.version.wizard',
args=[self.addon.slug, channel])
mock_point = 'olympia.devhub.views.extract_theme_properties'
with mock.patch(mock_point) as extract_theme_properties_mock:
extract_theme_properties_mock.return_value = {
'colors': {
'accentcolor': '#123456',
'textcolor': 'rgba(1,2,3,0.4)',
'tab_line': '#123',
},
'images': {
'additional_backgrounds': [],
},
'something_extra': {},
}
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#theme-wizard')
assert doc('#theme-wizard').attr('data-version') == '3.0'
assert doc('input#theme-name').attr('type') == 'hidden'
assert doc('input#theme-name').attr('value') == (
six.text_type(self.addon.name))
# Existing colors should be the default values for the fields
assert doc('#accentcolor').attr('value') == '#123456'
assert doc('#textcolor').attr('value') == 'rgba(1,2,3,0.4)'
# Warning about extra properties this time:
assert 'are unsupported in this wizard' in response.content
unsupported_list = doc('.notification-box.error ul.note li')
assert unsupported_list.length == 3
assert 'tab_line' in unsupported_list.text()
assert 'additional_backgrounds' in unsupported_list.text()
assert 'something_extra' in unsupported_list.text()
# And then check the upload works.
path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
self.upload = self.get_upload(abspath=path)
response = self.post()
version = self.addon.find_latest_version(channel=self.channel)
assert version.channel == self.channel
assert version.all_files[0].status == (
amo.STATUS_AWAITING_REVIEW
if self.channel == amo.RELEASE_CHANNEL_LISTED else
amo.STATUS_PUBLIC)
self.assert3xx(response, self.get_next_url(version))
log_items = ActivityLog.objects.for_addons(self.addon)
assert log_items.filter(action=amo.LOG.ADD_VERSION.id)
if self.channel == amo.RELEASE_CHANNEL_LISTED:
previews = list(version.previews.all())
assert len(previews) == 3
assert storage.exists(previews[0].image_path)
assert storage.exists(previews[1].image_path)
assert storage.exists(previews[1].image_path)
else:
assert version.previews.all().count() == 0
@mock.patch('olympia.devhub.forms.parse_addon',
wraps=_parse_addon_theme_permission_wrapper)
def test_dynamic_theme_tagging(self, parse_addon_mock):
self.addon.update(guid='beastify@mozilla.org')
path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/valid_webextension.xpi')
self.upload = self.get_upload(abspath=path)
response = self.post()
version = self.addon.find_latest_version(channel=self.channel)
self.assert3xx(
response, self.get_next_url(version))
if self.channel == amo.RELEASE_CHANNEL_LISTED:
assert self.addon.tags.filter(tag_text='dynamic theme').exists()
else:
assert not self.addon.tags.filter(
tag_text='dynamic theme').exists()
class TestVersionSubmitUploadListed(VersionSubmitUploadMixin, UploadTest):
channel = amo.RELEASE_CHANNEL_LISTED
def test_success(self):
response = self.post()
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
assert version.channel == amo.RELEASE_CHANNEL_LISTED
assert version.all_files[0].status == amo.STATUS_AWAITING_REVIEW
self.assert3xx(response, self.get_next_url(version))
log_items = ActivityLog.objects.for_addons(self.addon)
assert log_items.filter(action=amo.LOG.ADD_VERSION.id)
@mock.patch('olympia.devhub.views.sign_file')
def test_experiments_inside_webext_are_auto_signed(self, mock_sign_file):
"""Experiment extensions (bug 1220097) are auto-signed."""
self.grant_permission(
self.user, ':'.join(amo.permissions.EXPERIMENTS_SUBMIT))
self.upload = self.get_upload(
'experiment_inside_webextension.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
}))
self.addon.update(
guid='@experiment-inside-webextension-guid',
status=amo.STATUS_PUBLIC)
self.post()
# Make sure the file created and signed is for this addon.
assert mock_sign_file.call_count == 1
mock_sign_file_call = mock_sign_file.call_args[0]
signed_file = mock_sign_file_call[0]
assert signed_file.version.addon == self.addon
assert signed_file.version.channel == amo.RELEASE_CHANNEL_LISTED
# There is a log for that file (with passed validation).
log = ActivityLog.objects.latest(field_name='id')
assert log.action == amo.LOG.EXPERIMENT_SIGNED.id
@mock.patch('olympia.devhub.views.sign_file')
def test_experiment_inside_webext_upload_without_permission(
self, mock_sign_file):
self.upload = self.get_upload(
'experiment_inside_webextension.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
}))
self.addon.update(
guid='@experiment-inside-webextension-guid',
status=amo.STATUS_PUBLIC)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'You cannot submit this type of add-on')
assert mock_sign_file.call_count == 0
@mock.patch('olympia.devhub.views.sign_file')
def test_theme_experiment_inside_webext_upload_without_permission(
self, mock_sign_file):
self.upload = self.get_upload(
'theme_experiment_inside_webextension.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
}))
self.addon.update(
guid='@theme–experiment-inside-webextension-guid',
status=amo.STATUS_PUBLIC)
response = self.post(expected_status=200)
assert pq(response.content)('ul.errorlist').text() == (
'You cannot submit this type of add-on')
assert mock_sign_file.call_count == 0
def test_incomplete_addon_now_nominated(self):
"""Uploading a new version for an incomplete addon should set it to
nominated."""
self.addon.current_version.files.update(status=amo.STATUS_DISABLED)
self.addon.update_status()
# Deleting all the versions should make it null.
assert self.addon.status == amo.STATUS_NULL
self.post()
self.addon.reload()
assert self.addon.status == amo.STATUS_NOMINATED
class TestVersionSubmitUploadUnlisted(VersionSubmitUploadMixin, UploadTest):
channel = amo.RELEASE_CHANNEL_UNLISTED
@mock.patch('olympia.reviewers.utils.sign_file')
def test_success(self, mock_sign_file):
"""Sign automatically."""
# No validation errors or warning.
result = {
'errors': 0,
'warnings': 0,
'notices': 2,
'metadata': {},
'messages': [],
}
self.upload = self.get_upload(
'extension.xpi', validation=json.dumps(result))
response = self.post()
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert version.channel == amo.RELEASE_CHANNEL_UNLISTED
assert version.all_files[0].status == amo.STATUS_PUBLIC
self.assert3xx(response, self.get_next_url(version))
assert mock_sign_file.called
class TestVersionSubmitSource(TestAddonSubmitSource):
def setUp(self):
super(TestVersionSubmitSource, self).setUp()
addon = self.get_addon()
self.version = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_LISTED,
license_id=addon.versions.latest().license_id)
self.url = reverse(
'devhub.submit.version.source', args=[addon.slug, self.version.pk])
self.next_url = reverse(
'devhub.submit.version.details',
args=[addon.slug, self.version.pk])
assert not self.get_version().source
class TestVersionSubmitDetails(TestSubmitBase):
def setUp(self):
super(TestVersionSubmitDetails, self).setUp()
addon = self.get_addon()
self.version = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_LISTED,
license_id=addon.versions.latest().license_id)
self.url = reverse('devhub.submit.version.details',
args=[addon.slug, self.version.pk])
def test_submit_empty_is_okay(self):
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
assert response.status_code == 200
response = self.client.post(self.url, {})
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
assert not self.version.approval_notes
assert not self.version.release_notes
def test_submit_success(self):
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected - trying to sneak in a field that shouldn't
# be modified when this is not the first listed version.
data = {'approval_notes': 'approove plz',
'release_notes': 'loadsa stuff', 'name': 'foo'}
response = self.client.post(self.url, data)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
# This field should not have been modified.
assert self.get_addon().name != 'foo'
self.version.reload()
assert self.version.approval_notes == 'approove plz'
assert self.version.release_notes == 'loadsa stuff'
def test_submit_details_unlisted_should_redirect(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
def test_show_request_for_information(self):
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=self.days_ago(2))
ActivityLog.create(
amo.LOG.REVIEWER_REPLY_VERSION, self.addon, self.version,
user=self.user, details={'comments': 'this should not be shown'})
ActivityLog.create(
amo.LOG.REQUEST_INFORMATION, self.addon, self.version,
user=self.user, details={'comments': 'this is an info request'})
response = self.client.get(self.url)
assert response.status_code == 200
assert 'this should not be shown' not in response.content
assert 'this is an info request' in response.content
def test_dont_show_request_for_information_if_none_pending(self):
ActivityLog.create(
amo.LOG.REVIEWER_REPLY_VERSION, self.addon, self.version,
user=self.user, details={'comments': 'this should not be shown'})
ActivityLog.create(
amo.LOG.REQUEST_INFORMATION, self.addon, self.version,
user=self.user, details={'comments': 'this is an info request'})
response = self.client.get(self.url)
assert response.status_code == 200
assert 'this should not be shown' not in response.content
assert 'this is an info request' not in response.content
def test_clear_request_for_information(self):
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=self.days_ago(2))
response = self.client.post(
self.url, {'clear_pending_info_request': True})
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert flags.pending_info_request is None
activity = ActivityLog.objects.for_addons(self.addon).filter(
action=amo.LOG.DEVELOPER_CLEAR_INFO_REQUEST.id).get()
assert activity.user == self.user
assert activity.arguments == [self.addon, self.version]
def test_dont_clear_request_for_information(self):
past_date = self.days_ago(2)
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=past_date)
response = self.client.post(self.url)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert flags.pending_info_request == past_date
assert not ActivityLog.objects.for_addons(self.addon).filter(
action=amo.LOG.DEVELOPER_CLEAR_INFO_REQUEST.id).exists()
def test_can_cancel_review(self):
addon = self.get_addon()
addon_status = addon.status
addon.versions.latest().files.update(status=amo.STATUS_AWAITING_REVIEW)
cancel_url = reverse('devhub.addons.cancel', args=['a3615'])
versions_url = reverse('devhub.addons.versions', args=['a3615'])
response = self.client.post(cancel_url)
self.assert3xx(response, versions_url)
addon = self.get_addon()
assert addon.status == addon_status # No change.
version = addon.versions.latest()
del version.all_files
assert version.statuses == [
(version.all_files[0].id, amo.STATUS_DISABLED)]
def test_public_addon_stays_public_even_if_had_missing_metadata(self):
"""Posting details for a new version for a public add-on that somehow
had missing metadata despite being public shouldn't reset it to
nominated."""
# Create a built-in License we'll use later when posting.
License.objects.create(builtin=3, on_form=True)
# Remove license from existing versions, but make sure the addon is
# still public, just lacking metadata now.
self.addon.versions.update(license_id=None)
self.addon.reload()
assert self.addon.status == amo.STATUS_PUBLIC
assert not self.addon.has_complete_metadata()
# Now, submit details for that new version, adding license. Since
# metadata is missing, name, slug, summary and category are required to
# be present.
data = {
'name': six.text_type(self.addon.name),
'slug': self.addon.slug,
'summary': six.text_type(self.addon.summary),
'form-0-categories': [22, 1],
'form-0-application': 1,
'form-INITIAL_FORMS': 1,
'form-TOTAL_FORMS': 1,
'license-builtin': 3,
}
response = self.client.post(self.url, data)
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
self.addon.reload()
assert self.addon.has_complete_metadata()
assert self.addon.status == amo.STATUS_PUBLIC
def test_submit_static_theme_should_redirect(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
assert all(self.get_addon().get_required_metadata())
response = self.client.get(self.url)
# No extra details for subsequent theme uploads so just redirect.
self.assert3xx(
response, reverse('devhub.submit.version.finish',
args=[self.addon.slug, self.version.pk]))
class TestVersionSubmitDetailsFirstListed(TestAddonSubmitDetails):
""" Testing the case of a listed version being submitted on an add-on that
previously only had unlisted versions - so is missing metadata."""
def setUp(self):
super(TestVersionSubmitDetailsFirstListed, self).setUp()
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.version = version_factory(addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
self.version.update(license=None) # Addon needs to be missing data.
self.url = reverse('devhub.submit.version.details',
args=['a3615', self.version.pk])
self.next_step = reverse('devhub.submit.version.finish',
args=['a3615', self.version.pk])
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_action_taken(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.client.post(self.url, data)
assert response.status_code == 200
self.assertFormError(
response, 'form', 'name',
'The text entered has been flagged as spam.')
self.assertFormError(
response, 'form', 'summary',
'The text entered has been flagged as spam.')
# The summary WILL be comment_check'd, even though it didn't change,
# because we don't trust existing metadata when the previous versions
# were unlisted.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 2
report = AkismetReport.objects.first()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) != u'spám'
report = AkismetReport.objects.last()
assert report.comment_type == 'product-summary'
assert report.comment == u'Delicious Bookmarks is the official'
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@override_switch('akismet-addon-action', active=False)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_spam_logging_only(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# The summary WILL be comment_check'd, even though it didn't change,
# because we don't trust existing metadata when the previous versions
# were unlisted.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 2
report = AkismetReport.objects.first()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) == u'spám' # It changed
report = AkismetReport.objects.last()
assert report.comment_type == 'product-summary'
assert report.comment == u'Delicious Bookmarks is the official'
assert 'spam' not in response.content
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
data = self.get_dict(name=u'spám', summary=self.addon.summary)
response = self.is_success(data)
# The summary WILL be comment_check'd, even though it didn't change,
# because we don't trust existing metadata when the previous versions
# were unlisted.
self.addon = self.addon.reload()
assert AkismetReport.objects.count() == 2
report = AkismetReport.objects.first()
assert report.comment_type == 'product-name'
assert report.comment == u'spám'
assert text_type(self.addon.name) == u'spám' # It changed
report = AkismetReport.objects.last()
assert report.comment_type == 'product-summary'
assert report.comment == u'Delicious Bookmarks is the official'
assert 'spam' not in response.content
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.tasks.AkismetReport.comment_check')
def test_akismet_spam_check_no_changes(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
# Don't change either name or summary from the upload.
data = self.get_dict(name=self.addon.name, summary=self.addon.summary)
response = self.is_success(data)
# No changes but both values were spam checked.
assert AkismetReport.objects.count() == 2
assert 'spam' not in response.content
assert comment_check_mock.call_count == 2
class TestVersionSubmitFinish(TestAddonSubmitFinish):
def setUp(self):
super(TestVersionSubmitFinish, self).setUp()
addon = self.get_addon()
self.version = version_factory(
addon=addon,
channel=amo.RELEASE_CHANNEL_LISTED,
license_id=addon.versions.latest().license_id,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.url = reverse('devhub.submit.version.finish',
args=[addon.slug, self.version.pk])
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email(self, send_welcome_email_mock):
"""No emails for version finish."""
self.client.get(self.url)
assert not send_welcome_email_mock.called
def test_addon_no_versions_redirects_to_versions(self):
# No versions makes getting to this step difficult!
pass
# No emails for any of these cases so ignore them.
def test_welcome_email_for_newbies(self):
pass
def test_welcome_email_first_listed_addon(self):
pass
def test_welcome_email_if_previous_addon_is_incomplete(self):
pass
def test_no_welcome_email_if_unlisted(self):
pass
| {
"content_hash": "e6237279c126fbfa7765ab9c09e10ce1",
"timestamp": "",
"source": "github",
"line_count": 2540,
"max_line_length": 79,
"avg_line_length": 43.27165354330709,
"alnum_prop": 0.623792193612956,
"repo_name": "wagnerand/olympia",
"id": "5f5f7297646d037c731a85ce158463687d7d6acb",
"size": "109974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/devhub/tests/test_views_submit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3996776"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
""" This module reimplements Python's native threading module using Panda
threading constructs. It's designed as a drop-in replacement for the
threading module for code that works with Panda; it is necessary because
in some compilation models, Panda's threading constructs are
incompatible with the OS-provided threads used by Python's thread
module.
This module implements the threading module with a thin layer over
Panda's threading constructs. As such, the semantics are close to,
but not precisely, the semantics documented for Python's standard
threading module. If you really do require strict adherence to
Python's semantics, see the threading2 module instead.
However, if you don't need such strict adherence to Python's original
semantics, this module is probably a better choice. It is likely to
be slighly faster than the threading2 module (and even slightly faster
than Python's own threading module). It is also better integrated
with Panda's threads, so that Panda's thread debug mechanisms will be
easier to use and understand.
It is permissible to mix-and-match both threading and threading2
within the same application. """
from panda3d import core
from direct.stdpy import thread as _thread
import sys as _sys
import weakref
__all__ = [
'Thread',
'Lock', 'RLock',
'Condition',
'Semaphore', 'BoundedSemaphore',
'Event',
'Timer',
'local',
'current_thread', 'currentThread',
'enumerate', 'active_count', 'activeCount',
'settrace', 'setprofile', 'stack_size',
]
local = _thread._local
_newname = _thread._newname
class ThreadBase:
""" A base class for both Thread and ExternalThread in this
module. """
def __init__(self):
pass
def getName(self):
return self.name
def is_alive(self):
return self.__thread.isStarted()
def isAlive(self):
return self.__thread.isStarted()
def isDaemon(self):
return self.daemon
def setDaemon(self, daemon):
if self.is_alive():
raise RuntimeError
self.__dict__['daemon'] = daemon
def __setattr__(self, key, value):
if key == 'name':
self.setName(value)
elif key == 'ident':
raise AttributeError
elif key == 'daemon':
self.setDaemon(value)
else:
self.__dict__[key] = value
# Copy these static methods from Panda's Thread object. These are
# useful if you may be running in Panda's SIMPLE_THREADS compilation
# mode.
ThreadBase.forceYield = core.Thread.forceYield
ThreadBase.considerYield = core.Thread.considerYield
class Thread(ThreadBase):
""" This class provides a wrapper around Panda's PythonThread
object. The wrapper is designed to emulate Python's own
threading.Thread object. """
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
ThreadBase.__init__(self)
assert group is None
self.__target = target
self.__args = args
self.__kwargs = kwargs
if not name:
name = _newname()
current = current_thread()
self.__dict__['daemon'] = current.daemon
self.__dict__['name'] = name
self.__thread = core.PythonThread(self.run, None, name, name)
threadId = _thread._add_thread(self.__thread, weakref.proxy(self))
self.__dict__['ident'] = threadId
def __del__(self):
# On interpreter shutdown, the _thread module might have
# already been cleaned up.
if _thread and _thread._remove_thread_id:
_thread._remove_thread_id(self.ident)
def start(self):
if self.__thread.isStarted():
raise RuntimeError
if not self.__thread.start(core.TPNormal, True):
raise RuntimeError
def run(self):
if _settrace_func:
_sys.settrace(_settrace_func)
if _setprofile_func:
_sys.setprofile(_setprofile_func)
self.__target(*self.__args, **self.__kwargs)
def join(self, timeout = None):
# We don't support a timed join here, sorry.
assert timeout is None
self.__thread.join()
self.__thread = None
def setName(self, name):
self.__dict__['name'] = name
self.__thread.setName(name)
class ExternalThread(ThreadBase):
""" Returned for a Thread object that wasn't created by this
interface. """
def __init__(self, extThread, threadId):
ThreadBase.__init__(self)
self.__thread = extThread
self.__dict__['daemon'] = True
self.__dict__['name'] = self.__thread.getName()
self.__dict__['ident'] = threadId
def start(self):
raise RuntimeError
def run(self):
raise RuntimeError
def join(self, timeout = None):
raise RuntimeError
def setDaemon(self, daemon):
raise RuntimeError
class MainThread(ExternalThread):
""" Returned for the MainThread object. """
def __init__(self, extThread, threadId):
ExternalThread.__init__(self, extThread, threadId)
self.__dict__['daemon'] = False
class Lock(core.Mutex):
""" This class provides a wrapper around Panda's Mutex object.
The wrapper is designed to emulate Python's own threading.Lock
object. """
def __init__(self, name = "PythonLock"):
core.Mutex.__init__(self, name)
def acquire(self, blocking = True):
if blocking:
core.Mutex.acquire(self)
return True
else:
return core.Mutex.tryAcquire(self)
__enter__ = acquire
def __exit__(self, t, v, tb):
self.release()
class RLock(core.ReMutex):
""" This class provides a wrapper around Panda's ReMutex object.
The wrapper is designed to emulate Python's own threading.RLock
object. """
def __init__(self, name = "PythonRLock"):
core.ReMutex.__init__(self, name)
def acquire(self, blocking = True):
if blocking:
core.ReMutex.acquire(self)
return True
else:
return core.ReMutex.tryAcquire(self)
__enter__ = acquire
def __exit__(self, t, v, tb):
self.release()
class Condition(core.ConditionVarFull):
""" This class provides a wrapper around Panda's ConditionVarFull
object. The wrapper is designed to emulate Python's own
threading.Condition object. """
def __init__(self, lock = None):
if not lock:
lock = Lock()
# Panda doesn't support RLock objects used with condition
# variables.
assert isinstance(lock, Lock)
self.__lock = lock
core.ConditionVarFull.__init__(self, self.__lock)
def acquire(self, *args, **kw):
return self.__lock.acquire(*args, **kw)
def release(self):
self.__lock.release()
def wait(self, timeout = None):
if timeout is None:
core.ConditionVarFull.wait(self)
else:
core.ConditionVarFull.wait(self, timeout)
def notifyAll(self):
core.ConditionVarFull.notifyAll(self)
notify_all = notifyAll
__enter__ = acquire
def __exit__(self, t, v, tb):
self.release()
class Semaphore(core.Semaphore):
""" This class provides a wrapper around Panda's Semaphore
object. The wrapper is designed to emulate Python's own
threading.Semaphore object. """
def __init__(self, value = 1):
core.Semaphore.__init__(self, value)
def acquire(self, blocking = True):
if blocking:
core.Semaphore.acquire(self)
return True
else:
return core.Semaphore.tryAcquire(self)
__enter__ = acquire
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
""" This class provides a wrapper around Panda's Semaphore
object. The wrapper is designed to emulate Python's own
threading.BoundedSemaphore object. """
def __init__(self, value = 1):
self.__max = value
Semaphore.__init__(value)
def release(self):
if self.getCount() > value:
raise ValueError
Semaphore.release(self)
class Event:
""" This class is designed to emulate Python's own threading.Event
object. """
def __init__(self):
self.__lock = core.Lock("Python Event")
self.__cvar = core.ConditionVarFull(self.__lock)
self.__flag = False
def is_set(self):
return self.__flag
isSet = is_set
def set(self):
self.__lock.acquire()
try:
self.__flag = True
self.__cvar.signalAll()
finally:
self.__lock.release()
def clear(self):
self.__lock.acquire()
try:
self.__flag = False
finally:
self.__lock.release()
def wait(self, timeout = None):
self.__lock.acquire()
try:
if timeout is None:
while not self.__flag:
self.__cvar.wait()
else:
clock = core.TrueClock.getGlobalPtr()
expires = clock.getShortTime() + timeout
while not self.__flag:
wait = expires - clock.getShortTime()
if wait < 0:
return
self.__cvar.wait(wait)
finally:
self.__lock.release()
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.isSet():
self.function(*self.args, **self.kwargs)
self.finished.set()
def _create_thread_wrapper(t, threadId):
""" Creates a thread wrapper for the indicated external thread. """
if isinstance(t, core.MainThread):
pyt = MainThread(t, threadId)
else:
pyt = ExternalThread(t, threadId)
return pyt
def current_thread():
t = core.Thread.getCurrentThread()
return _thread._get_thread_wrapper(t, _create_thread_wrapper)
currentThread = current_thread
def enumerate():
tlist = []
_thread._threadsLock.acquire()
try:
for thread, locals, wrapper in list(_thread._threads.values()):
if wrapper and thread.isStarted():
tlist.append(wrapper)
return tlist
finally:
_thread._threadsLock.release()
def active_count():
return len(enumerate())
activeCount = active_count
_settrace_func = None
def settrace(func):
global _settrace_func
_settrace_func = func
_setprofile_func = None
def setprofile(func):
global _setprofile_func
_setprofile_func = func
def stack_size(size = None):
raise ThreadError
if __debug__:
def _test():
from collections import deque
_sleep = core.Thread.sleep
_VERBOSE = False
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
format = "%s: %s\n" % (
currentThread().getName(), format)
_sys.stderr.write(format)
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = Lock(name = "BoundedQueue.mon")
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.getName(), counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print(item)
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.setName("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
| {
"content_hash": "1f0827627302e591544b63f5d4fac098",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 79,
"avg_line_length": 28.204322200392927,
"alnum_prop": 0.5668013374198941,
"repo_name": "brakhane/panda3d",
"id": "4c0ce1e8fbddd06b6b3064cb0044f99de6faaf9f",
"size": "14356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "direct/src/stdpy/threading.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "6395016"
},
{
"name": "C++",
"bytes": "31193551"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "3106"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "4403"
},
{
"name": "Objective-C",
"bytes": "30065"
},
{
"name": "Objective-C++",
"bytes": "300394"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30636"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5530601"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
} |
import os
import sys
import pip
import shutil
import tempfile
from cloudify.utils import setup_logger
from cloudify.utils import LocalCommandRunner
from cloudify_agent.api import utils
from cloudify_agent.api import plugins
from cloudify_agent.api.utils import get_pip_path
from cloudify_agent.api import exceptions
class PluginInstaller(object):
def __init__(self, logger=None):
self.logger = logger or setup_logger(self.__class__.__name__)
self.runner = LocalCommandRunner(logger=self.logger)
def install(self, source, args=''):
"""
Install the plugin to the current virtualenv.
:param source: URL to the plugin. Any pip acceptable URL is ok.
:param args: extra installation arguments passed to the pip command
"""
plugin_dir = None
try:
if os.path.isabs(source):
plugin_dir = source
else:
self.logger.debug('Extracting archive: {0}'.format(source))
plugin_dir = extract_package_to_dir(source)
self.logger.debug('Installing from directory: {0} '
'[args={1}]'.format(plugin_dir, args))
command = '{0} install {1} {2}'.format(
get_pip_path(), plugin_dir, args)
self.runner.run(command, cwd=plugin_dir)
package_name = extract_package_name(plugin_dir)
self.logger.debug('Retrieved package name: {0}'
.format(package_name))
finally:
if plugin_dir and not os.path.isabs(source):
self.logger.debug('Removing directory: {0}'
.format(plugin_dir))
shutil.rmtree(plugin_dir)
return package_name
def uninstall(self, package_name, ignore_missing=True):
"""
Uninstall the plugin from the current virtualenv. By default this
operation will fail when trying to uninstall a plugin that is not
installed, use `ignore_missing` to change this behavior.
:param package_name: the package name as stated in the setup.py file
:param ignore_missing: ignore failures in uninstalling missing plugins.
"""
if not ignore_missing:
self.runner.run('{0} uninstall -y {1}'.format(
utils.get_pip_path(), package_name))
else:
out = self.runner.run(
'{0} freeze'.format(utils.get_pip_path())).std_out
packages = []
for line in out.splitlines():
packages.append(line.split('==')[0])
if package_name in packages:
self.runner.run('{0} uninstall -y {1}'.format(
utils.get_pip_path(), package_name))
else:
self.logger.info('{0} not installed. Nothing to do'
.format(package_name))
def extract_package_to_dir(package_url):
"""
Extracts a pip package to a temporary directory.
:param package_url: the URL to the package source.
:return: the directory the package was extracted to.
"""
plugin_dir = None
try:
plugin_dir = tempfile.mkdtemp()
# check pip version and unpack plugin_url accordingly
if is_pip6_or_higher():
pip.download.unpack_url(link=pip.index.Link(package_url),
location=plugin_dir,
download_dir=None,
only_download=False)
else:
req_set = pip.req.RequirementSet(build_dir=None,
src_dir=None,
download_dir=None)
req_set.unpack_url(link=pip.index.Link(package_url),
location=plugin_dir,
download_dir=None,
only_download=False)
except Exception as e:
if plugin_dir and os.path.exists(plugin_dir):
shutil.rmtree(plugin_dir)
raise exceptions.PluginInstallationError(
'Failed to download and unpack package from {0}: {1}'
.format(package_url, str(e)))
return plugin_dir
def is_pip6_or_higher(pip_version=None):
"""
Determines if the pip version passed is higher than version 6.
:param pip_version: the version of pip
:return: whether or not the version is higher than version 6.
"""
major, minor, micro = parse_pip_version(pip_version)
if int(major) >= 6:
return True
else:
return False
def parse_pip_version(pip_version=''):
"""
Parses a pip version string to identify major, minor, micro versions.
:param pip_version: the version of pip
:return: major, minor, micro version of pip
:rtype: tuple
"""
if not pip_version:
try:
pip_version = pip.__version__
except AttributeError as e:
raise exceptions.PluginInstallationError(
'Failed to get pip version: ', str(e))
if not isinstance(pip_version, basestring):
raise exceptions.PluginInstallationError(
'Invalid pip version: {0} is not a string'
.format(pip_version))
if not pip_version.__contains__("."):
raise exceptions.PluginInstallationError(
'Unknown formatting of pip version: "{0}", expected '
'dot-delimited numbers (e.g. "1.5.4", "6.0")'
.format(pip_version))
version_parts = pip_version.split('.')
major = version_parts[0]
minor = version_parts[1]
micro = ''
if len(version_parts) > 2:
micro = version_parts[2]
if not str(major).isdigit():
raise exceptions.PluginInstallationError(
'Invalid pip version: "{0}", major version is "{1}" '
'while expected to be a number'
.format(pip_version, major))
if not str(minor).isdigit():
raise exceptions.PluginInstallationError(
'Invalid pip version: "{0}", minor version is "{1}" while '
'expected to be a number'
.format(pip_version, minor))
return major, minor, micro
def extract_package_name(package_dir):
"""
Detects the package name of the package located at 'package_dir' as
specified in the package setup.py file.
:param package_dir: the directory the package was extracted to.
:return: the package name
"""
runner = LocalCommandRunner()
plugin_name = runner.run(
'{0} {1} {2}'.format(
sys.executable,
os.path.join(os.path.dirname(plugins.__file__),
'extract_package_name.py'),
package_dir),
cwd=package_dir
).std_out
return plugin_name
| {
"content_hash": "7f0947a2353d928298b86cef915f2b1f",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 32.82211538461539,
"alnum_prop": 0.5725794638933646,
"repo_name": "geokala/cloudify-agent",
"id": "9354e16eef4c141d89b634419d8c3139bc85814a",
"size": "7465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudify_agent/api/plugins/installer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Inno Setup",
"bytes": "16035"
},
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "1759583"
},
{
"name": "Ruby",
"bytes": "10052"
},
{
"name": "Shell",
"bytes": "20353"
}
],
"symlink_target": ""
} |
"""
Django settings for messageboard project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ld0p5%jtpjn#kz$&m-s$zz49c9is1ba_f@ts7)$639q^f&(vgq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'board',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'messageboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'messageboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "fd7257ef43e9d4ed17172d5d320cfae9",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 91,
"avg_line_length": 26.237704918032787,
"alnum_prop": 0.6891596376132458,
"repo_name": "suomiy/message-board",
"id": "b3a30b0162aa193e36b5a838a9a1985de61c55b4",
"size": "3201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "messageboard/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2707"
},
{
"name": "HTML",
"bytes": "3327"
},
{
"name": "Python",
"bytes": "12693"
}
],
"symlink_target": ""
} |
import ray
import pytest
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.sql import SparkSession
@pytest.fixture(autouse=True, scope='package')
def orca_context_fixture():
from zoo.orca import init_orca_context, stop_orca_context
sc = init_orca_context(cores=8)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
spark = SparkSession(sc)
spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
yield
stop_orca_context()
| {
"content_hash": "0ffa471e96934f0e806c80ece24de038",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 27.6,
"alnum_prop": 0.6666666666666666,
"repo_name": "intel-analytics/analytics-zoo",
"id": "2e9c4d91e45d7b309ff96fc3b5446ecc72203870",
"size": "1280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/test/zoo/orca/learn/ray/pytorch/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
} |
import unittest
import keymanagement
import tempfile
import os
from uuid import UUID
def createFakeKey():
f = tempfile.NamedTemporaryFile(delete=False)
f.write("just a test")
f.close
return f
def clearFakeKey(f):
os.unlink(f.name)
os.unlink(f.name+".yaml")
class SimplisticTest(unittest.TestCase):
def test_getKeyHashFromKey(self):
f = createFakeKey()
assert keymanagement.getKeyHashFromKey(f.name) == "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
os.unlink(f.name)
def test_catalog(self):
f = createFakeKey()
keymanagement.catalog(f.name, True, force = True)
assert os.path.exists(f.name+".yaml")
clearFakeKey(f)
def test_getCatalogUUID(self):
f = createFakeKey()
keymanagement.catalog(f.name, True, force = True)
assert UUID(keymanagement.getCatalogUUID(f.name).urn[9:], version=4)
clearFakeKey(f)
def test_getKeyBytes(self):
f = createFakeKey()
f.write("0"*10000)
keymanagement.catalog(f.name, True, force = True)
k = keymanagement.getKeyBytes(f.name, 1, l2r=True, waste=True)
assert 'j' == k[0]
clearFakeKey(f)
def test_printable(self):
f = tempfile.NamedTemporaryFile(delete=False)
b = bytearray([150,150,150,150,150,0,0,0,0])
f.write(b)
f.close()
keymanagement.catalog(f.name, True, force = True)
assert "fuwfuwfuwfuwfuwaaaaaaaaaaaa" == keymanagement.printable(f.name)
clearFakeKey(f)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cd068c89d40012b43ca3fe2a7f01df53",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 188,
"avg_line_length": 27.032258064516128,
"alnum_prop": 0.6575178997613366,
"repo_name": "millaguie/Vernam",
"id": "29cd5644b5478f37a720ff1c7fa2167932572cc8",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vernam/test_keymanagement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33843"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import pytest
from sentry.utils.urls import non_standard_url_join
@pytest.mark.parametrize(
"base,to_join,expected",
[
("http://example.com/foo", "bar", "http://example.com/bar"),
("http://example.com/foo", "/bar", "http://example.com/bar"),
("https://example.com/foo", "/bar", "https://example.com/bar"),
("aps://example.com/foo", "/bar", "aps://example.com/bar"),
("apsunknown://example.com/foo", "/bar", "apsunknown://example.com/bar"),
("apsunknown://example.com/foo", "//aha/uhu", "apsunknown://aha/uhu"),
],
)
def test_non_standard_url_join(base, to_join, expected):
assert non_standard_url_join(base, to_join) == expected
| {
"content_hash": "ecacf6d14548b571e08ebe0665d2c82e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 81,
"avg_line_length": 36.8,
"alnum_prop": 0.6127717391304348,
"repo_name": "beeftornado/sentry",
"id": "f0e9e339794ef58cc84ce6c9604e9930193afc62",
"size": "736",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sentry/utils/test_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
#numerical operations
#numbers
3 + 5
3 * 5
#float - any number with a "." is considered float by python
3.1 + 5.9 | {
"content_hash": "f14d888186696bd31410fdbee89c7e26",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 10.833333333333334,
"alnum_prop": 0.6,
"repo_name": "KT26/PythonCourse",
"id": "5da9a005e00165a54f5c52c88fd50a30a01565a3",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1. Getting Started/10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52736"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import re
from collections import namedtuple
class CqlSplitter(object):
"""
Makeshift CQL parser that can only split up multiple statements.
C* does not accept multiple DDL queries as a single string, as it can with
DML queries using batches. Hence, we must split up CQL files to run each
statement individually. Do that by using a simple Regex scanner, that just
recognizes strings, comments and delimiters, which is enough to split up
statements without tripping when semicolons are commented or escaped.
"""
Token = namedtuple('Token', 'tpe token')
LINE_COMMENT = 1
BLOCK_COMMENT = 2
STRING = 3
SEMICOLON = 4
OTHER = 5
WHITESPACE = 6
@classmethod
def scanner(cls):
if not getattr(cls, '_scanner', None):
def h(tpe):
return lambda sc, tk: cls.Token(tpe, tk)
cls._scanner = re.Scanner([
(r"(--|//).*?$", h(cls.LINE_COMMENT)),
(r"\/\*.+?\*\/", h(cls.BLOCK_COMMENT)),
(r'"(?:[^"\\]|\\.)*"', h(cls.STRING)),
(r"'(?:[^'\\]|\\.)*'", h(cls.STRING)),
(r"\$\$(?:[^\$\\]|\\.)*\$\$", h(cls.STRING)),
(r";", h(cls.SEMICOLON)),
(r"\s+", h(cls.WHITESPACE)),
(r".", h(cls.OTHER))
], re.MULTILINE | re.DOTALL)
return cls._scanner
@classmethod
def split(cls, query):
"""Split up content, and return individual statements uncommented"""
tokens, match = cls.scanner().scan(query)
cur_statement = ''
statements = []
for i, tk in enumerate(tokens):
if tk.tpe == cls.LINE_COMMENT:
pass
elif tk.tpe == cls.SEMICOLON:
stm = cur_statement.strip()
if stm:
statements.append(stm)
cur_statement = ''
elif tk.tpe in (cls.WHITESPACE, cls.BLOCK_COMMENT):
cur_statement += ' '
elif tk.tpe in (cls.STRING, cls.OTHER):
cur_statement += tk.token
stm = cur_statement.strip()
if stm:
statements.append(stm)
return statements
| {
"content_hash": "eda60bb2418964d010aa9e206685528c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 34.56521739130435,
"alnum_prop": 0.5048218029350104,
"repo_name": "Cobliteam/cassandra-migrate",
"id": "12ae0d04629b2cef7372d38dd48a26ddbbb02fc2",
"size": "2404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cassandra_migrate/cql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41696"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
import sys
import argparse
import numpy as np
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--xaxis', type=str, required=True,
help='''specify the endpoint values of the sampling
grid along the x-axis and the number of sampling points
in between. Syntax: --xaxis=start,stop,num.''')
parser.add_argument('--yaxis', type=str, required=True,
help='''specify the endpoint values of the sampling
grid along the y-axis and the number of sampling points
in between. Syntax: --yaxis=start,stop,num.''')
parser.add_argument('--zaxis', type=str, required=False, default=None,
help='''specify the endpoint values of the sampling
grid along the z-axis and the number of sampling points
in between. Syntax: --zaxis=start,stop,num.''')
args = parser.parse_args()
if args.zaxis == None:
# set grid along x-axis
xaxis = args.xaxis.split(',')
if len(xaxis) != 3:
sys.exit('''
Error: Must specify three values when using --xaxis parameter.
Syntax: --xaxis=start,stop,num
''')
if float(xaxis[0]) < float(xaxis[1]) and int(xaxis[2]) > 0:
x = np.linspace(float(xaxis[0]), float(xaxis[1]), int(xaxis[2]))
elif float(xaxis[0]) >= float(xaxis[1]):
sys.exit('''
Error: Starting x-value of the sampling grid must be less
than the ending x-value of the sampling grid.
''')
elif int(xaxis[2]) <= 0:
sys.exit('''
Error: Number of sampling points along the x-axis
must be greater than zero.
''')
# set grid along y-axis
yaxis = args.yaxis.split(',')
if len(yaxis) != 3:
sys.exit('''
Error: Must specify three values when using --yaxis parameter.
Syntax: --yaxis=start,stop,num
''')
if float(yaxis[0]) < float(yaxis[1]) and int(yaxis[2]) > 0:
y = np.linspace(float(yaxis[0]), float(yaxis[1]), int(yaxis[2]))
elif float(yaxis[0]) >= float(yaxis[1]):
sys.exit('''
Error: Starting y-value of the sampling grid must be less
than the ending y-value of the sampling grid.
''')
elif int(yaxis[2]) <= 0:
sys.exit('''
Error: Number of sampling points along the y-axis
must be greater than zero.
''')
print('Setting up two-dimensional sampling grid...')
print('grid @ x-axis : start = ', float(xaxis[0]))
print('grid @ x-axis : stop = ', float(xaxis[1]))
print('grid @ x-axis : num = ', int(xaxis[2]))
print('grid @ y-axis : start = ', float(yaxis[0]))
print('grid @ y-axis : stop = ', float(yaxis[1]))
print('grid @ y-axis : num = ', int(yaxis[2]))
np.savez('grid.npz', ndim=2, x=x, y=y)
elif args.zaxis != None:
# set grid along x-axis
xaxis = args.xaxis.split(',')
if len(xaxis) != 3:
sys.exit('''
Error: Must specify three values when using --xaxis parameter.
Syntax: --xaxis=start,stop,num
''')
if float(xaxis[0]) < float(xaxis[1]) and int(xaxis[2]) > 0:
x = np.linspace(float(xaxis[0]), float(xaxis[1]), int(xaxis[2]))
elif float(xaxis[0]) >= float(xaxis[1]):
sys.exit('''
Error: Starting x-value of the sampling grid must be less
than the ending x-value of the sampling grid.
''')
elif int(xaxis[2]) <= 0:
sys.exit('''
Error: Number of sampling points along the x-axis
must be greater than zero.
''')
# set grid along y-axis
yaxis = args.yaxis.split(',')
if len(yaxis) != 3:
sys.exit('''
Error: Must specify three values when using --yaxis parameter.
Syntax: --yaxis=start,stop,num
''')
if float(yaxis[0]) < float(yaxis[1]) and int(yaxis[2]) > 0:
y = np.linspace(float(yaxis[0]), float(yaxis[1]), int(yaxis[2]))
elif float(yaxis[0]) >= float(yaxis[1]):
sys.exit('''
Error: Starting y-value of the sampling grid must be less
than the ending y-value of the sampling grid.
''')
elif int(yaxis[2]) <= 0:
sys.exit('''
Error: Number of sampling points along the y-axis
must be greater than zero.
''')
# set grid along z-axis
zaxis = args.zaxis.split(',')
if len(zaxis) != 3:
sys.exit('''
Error: Must specify three values when using --zaxis parameter.
Syntax: --zaxis=start,stop,num
''')
if float(zaxis[0]) < float(zaxis[1]) and int(zaxis[2]) > 0:
z = np.linspace(float(zaxis[0]), float(zaxis[1]), int(zaxis[2]))
elif float(zaxis[0]) >= float(zaxis[1]):
sys.exit('''
Error: Starting z-value of the sampling grid must be less
than the ending z-value of the sampling grid.
''')
elif int(zaxis[2]) <= 0:
sys.exit('''
Error: Number of sampling points along the z-axis
must be greater than zero.
''')
print('Setting up three-dimensional sampling grid...')
print('grid @ x-axis : start = ', float(xaxis[0]))
print('grid @ x-axis : stop = ', float(xaxis[1]))
print('grid @ x-axis : num = ', int(xaxis[2]))
print('grid @ y-axis : start = ', float(yaxis[0]))
print('grid @ y-axis : stop = ', float(yaxis[1]))
print('grid @ y-axis : num = ', int(yaxis[2]))
print('grid @ z-axis : start = ', float(zaxis[0]))
print('grid @ z-axis : stop = ', float(zaxis[1]))
print('grid @ z-axis : num = ', int(zaxis[2]))
np.savez('grid.npz', ndim=3, x=x, y=y, z=z)
| {
"content_hash": "34de157cc67ce4eef8bfa36c997da3da",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 83,
"avg_line_length": 45.703448275862065,
"alnum_prop": 0.4801569337558473,
"repo_name": "aaronprunty/starfish",
"id": "4bbd496cea2995e8d9e22a026ecfd2bf99aefa0e",
"size": "7300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vezda/setSamplingGrid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97935"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import boto3
import botocore
import json
import uuid
import datetime
DDB_TABLE = os.environ['DDB_TABLE']
TABLE_KEY = os.environ['TABLE_KEY']
def respondOK(res=None):
return {
'statusCode': '200',
'body': json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def respondMethodNotAllowed(msg):
return {
'statusCode': '405',
'body': json.dumps({'error': msg}),
'headers': {
'Content-Type': 'application/json',
},
}
def respondBadRequest(msg):
return {
'statusCode': '400',
'body': json.dumps({'error': msg}),
'headers': {
'Content-Type': 'application/json',
},
}
def respondConflict(msg):
return {
'statusCode': '409',
'body': json.dumps({'error': msg}),
'headers': {
'Content-Type': 'application/json',
},
}
def respondNotFound(msg):
return {
'statusCode': '404',
'body': json.dumps({'error': msg}),
'headers': {
'Content-Type': 'application/json',
},
}
def serialize_item(i):
try:
nodes = i['nodes']
except KeyError:
nodes = []
return {
'created_at': i['created_at'],
TABLE_KEY: i[TABLE_KEY],
'nodes': [n for n in nodes]
}
def lambda_handler(event, context):
# Get the DDB table.
dynamo = boto3.resource('dynamodb').Table(DDB_TABLE)
# Get the key request parameters.
operation = event['httpMethod']
resource = event['pathParameters']
queryStringParameters = event['queryStringParameters']
# If there is no resource, then create a new disco ID.
if resource is None:
# Create a new disco ID.
id = str(uuid.uuid1())
i = {
TABLE_KEY: id,
'created_at': str(datetime.datetime.utcnow())
}
# Store the new ID.
dynamo.put_item(Item=i)
return respondOK(serialize_item(i))
# A resource has been supplied -- access it.
id = resource['proxy']
try:
i = dynamo.get_item(Key={TABLE_KEY: id}, ConsistentRead=True)['Item']
except KeyError:
return respondNotFound('%s does not exist' % id)
if operation == 'GET':
return respondOK(serialize_item(i))
elif operation == 'POST' or operation == 'DELETE':
# Get the node address from the request.
try:
b = json.loads(event['body'])
except:
return respondBadRequest('bad request body')
# Decode the node details.
try:
addr = b['addr']
except KeyError:
return respondBadRequest('address not specified')
# All good, modify the list of nodes.
key = {TABLE_KEY: i[TABLE_KEY]}
if operation == 'POST':
expr = 'ADD nodes :n'
else:
expr = 'DELETE nodes :n'
dynamo.update_item(
Key=key,
UpdateExpression=' '.join([expr, 'SET updated_at=:m']),
ExpressionAttributeValues={':n': set([addr]), ':m': str(datetime.datetime.utcnow())}
)
# Return the updated object.
i = dynamo.get_item(Key={TABLE_KEY: id}, ConsistentRead=True)['Item']
return respondOK(serialize_item(i))
return respondMethodNotAllowed('unsupported method "{}"'.format(operation))
| {
"content_hash": "583ccaf344a9d5b81f3df04327a95258",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 96,
"avg_line_length": 27.12213740458015,
"alnum_prop": 0.537573881227132,
"repo_name": "rqlite/rqlite-disco",
"id": "5a2512c266e4080a1f768ad52c080292f0c83249",
"size": "3553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3553"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
'''
@date 2015-10-07
A script to convert glyphs of characters in *.po to u8glib supported
font array and prepare for U8Gettext.
It do these things :
1. Extract all characters from inputed po files
2. Find characters' glyphs from inputed font
3. Generate glyphs to u8glib font array to C/C++ sources
@author Hong-She Liang <starofrainnight@gmail.com>
'''
import os
import io
import struct
import six
import bdflib.reader
import argparse
import glob
import polib
import uuid
import os.path
"""
A class just for fix problem that bdflib can't recognize the new style
iterator in python3.
"""
class IteratorFixer(object):
def __init__(self, iterator):
self.__iterator = iterator
def __iter__(self):
return self.__iterator
def next(self):
return six.next(self.__iterator)
def get_font_properties(file_path):
font_file = open(file_path, "r")
properties = None
while(1):
line = font_file.readline()
if 'ENDPROPERTIES' in line:
break
if 'STARTPROPERTIES' in line:
properties = dict()
continue
if properties is None:
continue
line = line.strip()
line_parties = line.split()
properties[line_parties[0]] = line_parties[1]
return properties
def encode_as_c_string(str):
result = []
str_bytes = str.encode("utf-8")
for i in six.moves.range(0, len(str_bytes)):
ord_c = six.indexbytes(str_bytes, i)
if (32 <= ord_c) and (ord_c <= 126):
result.append(chr(ord_c))
else:
# Arduino Serial.print() have a strange behavior :
# If we pass "\x0AFrom this" string to output, it will not
# correct output with a new line character and string
# "From this", it will output "\xafrom this". Seems if we
# have any hex number followed \x0XX, it can't parse correct.
# So we use octal number instead.
result.append("\\%03o" % ord_c)
return ''.join(result)
def generate_languages_source(po_file_paths, utf32_to_u8gchar_mappings):
result = []
# Generate character mapping item (from utf32 to u8glib character)
utf32_keys = [ord(key) for key in six.iterkeys(utf32_to_u8gchar_mappings)]
utf32_keys.sort()
result.append("static const U8GettextCharMapping sCharMappings[] U8G_SECTION(\".progmem.U8GettextsCharMappings\") = \n{")
for key in utf32_keys:
line = "\t{0x%08X, 0x%02X,}, " % (key, utf32_to_u8gchar_mappings[six.unichr(key)])
result.append(line)
result.append("};")
result.append("static const size_t sCharMappingCount = "
"ITEM_COUNT_OF_ARRAY(sCharMappings);")
po_index = 0
for file_path in po_file_paths:
language_name = os.path.splitext(os.path.basename(file_path))[0]
po_file = polib.pofile(file_path)
translated_entries = po_file.translated_entries()
# Generate translations for each language
# We have to generate variables for each text, otherwise the compiler
# won't compile them into program memory!
i = 0
for entry in translated_entries:
result.append('static const char sMsgId%s_%s[] PROGMEM = "%s";' % (
po_index, i, encode_as_c_string(entry.msgid)))
result.append('static const char sMsgStr%s_%s[] PROGMEM = "%s";' % (
po_index, i, encode_as_c_string(entry.msgstr)))
i += 1
result.append('static const U8GettextTranslation sTranslations%s[] U8G_SECTION(".progmem.U8GettextsTranslations") = \n{' % language_name)
i = 0
for entry in translated_entries:
result.append('\t{(const U8GFChar*)&sMsgId%s_%s[0], (const U8GFChar*)&sMsgStr%s_%s[0]},' % (
po_index, i, po_index, i))
i += 1
result.append("};")
result.append("static const size_t sTranslationsLength%(language)s = "
"ITEM_COUNT_OF_ARRAY(sTranslations%(language)s);" % {"language":language_name})
po_index += 1
# Generate languages
result.append("static const U8GettextLanguage sLanguages[] = \n{")
for file_path in po_file_paths:
language_name = os.path.splitext(os.path.basename(file_path))[0]
result.append('\t{"%(language)s", '
'sTranslations%(language)s, '
'&sTranslationsLength%(language)s}' %
{"language":language_name})
result.append("};")
result.append("static const size_t sLanguagesLength = "
"ITEM_COUNT_OF_ARRAY(sLanguages);")
return "\n".join(result)
def gather_characters_from_po_files(po_file_paths):
characters = set()
# All visible ASCII charactes must have ..
for i in six.moves.range(32, 127):
characters.add(six.unichr(i))
for afile_path in po_file_paths:
po_file = polib.pofile(afile_path)
for anentry in po_file.translated_entries():
for acharacter in anentry.msgid:
characters.add(acharacter)
for acharacter in anentry.msgstr:
characters.add(acharacter)
for anentry in po_file.untranslated_entries():
for acharacter in anentry.msgid:
characters.add(acharacter)
for acharacter in anentry.msgstr:
characters.add(acharacter)
return characters
def gen_data():
program_description = 'A script to convert glyphs of characters in *.po to u8glib supported font array'
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument(
'-f',
'--font',
required=True,
help='A bdf font which we will extract glyphs from.')
parser.add_argument(
'-p',
'--po',
required=True,
help='Gettext generated PO files pattern')
parser.add_argument(
'-o',
'--output',
default='U8GettextData.cpp',
help='Output C/C++ source file path')
# If the arguments not enough or not fit for the arguments formats, program
# will exit from here
args = parser.parse_args()
po_file_paths = glob.glob(args.po)
# Analyse all charactes from *.po
characters = gather_characters_from_po_files(po_file_paths)
characters = dict.fromkeys(characters, None)
font_data = []
# Load font details from bdf file
unifont_iterator = IteratorFixer(iter(open(args.font, "r").readlines()))
unifont = bdflib.reader.read_bdf(unifont_iterator)
unifont_properties = get_font_properties(args.font)
first_glyph = unifont.glyphs[0]
uppercase_a_height = 0
uppercase_a_start = 0
lowercase_a_start = 0
# First encoding must not be 0, because u8glib depends on 0 to end
# the string.
encoding_start = 1
encoding_end = encoding_start + len(characters)
# FIXME We don't have these properties
font_xascent = 0
font_xdecent = 0
if ord('A') in unifont:
uppercase_a_height = unifont[ord('A')].bbH
# TODO Give value to upper case and lower case 'A' start position
header = struct.pack(">BBBbbBHHBBbbbbb",
0,
first_glyph.bbW, first_glyph.bbH, first_glyph.bbX, first_glyph.bbY,
uppercase_a_height, uppercase_a_start, lowercase_a_start,
encoding_start, encoding_end,
int(unifont_properties["FONT_DESCENT"]),
int(unifont_properties["FONT_ASCENT"]),
int(unifont_properties["FONT_DESCENT"]),
font_xascent, font_xdecent,
)
font_data.append(header)
u8g_encoding = encoding_start - 1
utf32_keys = [key for key in six.iterkeys(characters)]
utf32_keys.sort()
for acharacter in utf32_keys:
u8g_encoding += 1
characters[acharacter] = u8g_encoding
encoding = ord(acharacter)
glyph = unifont[encoding]
glyph_header = struct.pack(">BBBbbb",
glyph.bbW, glyph.bbH,
int((glyph.bbW + 7) // 8 * glyph.bbH),
glyph.bbW,
glyph.bbX, glyph.bbY,
)
glyph_data = []
for row_pixels in glyph.get_data():
for i in range(0, len(row_pixels), 2):
glyph_data.append(chr(int(row_pixels[i:i+2], 16)))
glyph_data = six.b(''.join(glyph_data))
font_data.append(glyph_header)
font_data.append(glyph_data)
font_data = six.b('').join(font_data)
# Generate font data C/C++ text data
font_data_source = []
for i in range(0, len(font_data)):
if (i % 16) == 0:
font_data_source.append("\n ")
data_item = "0x%02X" % six.indexbytes(font_data, i)
data_item = data_item + "," + " " * (5 - len(data_item))
font_data_source.append(data_item)
font_data_source = "".join(font_data_source)
# Generate C/C++ source files
source_file_name = os.path.basename(args.output)
source_file_dir = os.path.dirname(args.output)
source_file_basename, source_file_ext = os.path.splitext(source_file_name)
header_file_name = "%s.h" % source_file_basename
header_file_path = os.path.join(source_file_dir, header_file_name)
font_varaint_name = "sFont"
source_file = open(args.output, "wb")
# Generate source file header
source_file.write(six.b("""
/*
* Auto generated by u8gettext-gen-data.py
*/
#include <U8Gettext.h>
#include <U8glib.h>
#ifndef ITEM_COUNT_OF_ARRAY
#define ITEM_COUNT_OF_ARRAY(array) (sizeof((array)) / sizeof((array)[0]))
#endif // #ifndef ITEM_COUNT_OF_ARRAY
"""))
# Generate origin text directly to u8glib font text table
source_file.write(generate_languages_source(po_file_paths, characters).encode("utf-8"))
# Generate font data
source_file.write(six.b("""
static const u8g_fntpgm_uint8_t %(font_varaint_name)s[] U8G_SECTION(".progmem.U8Gettext%(font_varaint_name)s") =
{
%(font_data_source)s
};
static const size_t %(font_varaint_name)sEncodingCount = ITEM_COUNT_OF_ARRAY(%(font_varaint_name)s);
""" % {"font_varaint_name":font_varaint_name,
"font_data_source":font_data_source,
}))
# Generate U8Gettext context struct variants
source_file.write(six.b("""
const U8GettextContext __gU8GettextContext
{
\tsLanguages,
\t&sLanguagesLength,
\tsFont,
\t&sFontEncodingCount,
\tsCharMappings,
\t&sCharMappingCount,
};
"""
))
source_file.close()
# Don't generate header files, they will be included in U8Gettext
# library.
| {
"content_hash": "760852079b37e97d86df269ffdf09e5a",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 145,
"avg_line_length": 30.773529411764706,
"alnum_prop": 0.6251553091847463,
"repo_name": "starofrainnight/u8gettext",
"id": "8bee431e51c02d202fce547f90d5201deda1bbc3",
"size": "10463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "u8gettext/console_scripts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17046"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ExpEYES-Blocks'
copyright = u'2017, Georges Khaznadar'
author = u'Georges Khaznadar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.4'
# The full version, including alpha/beta/rc tags.
release = u'0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'ExpEYES-Blocks v0.4'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ExpEYES-Blocksdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ExpEYES-Blocks.tex', u'ExpEYES-Blocks Documentation',
u'Georges Khaznadar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'expeyes-blocks', u'ExpEYES-Blocks Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ExpEYES-Blocks', u'ExpEYES-Blocks Documentation',
author, 'ExpEYES-Blocks', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "7679783f4a94f298af4dfbb5f10cdac9",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 80,
"avg_line_length": 28.466257668711656,
"alnum_prop": 0.688146551724138,
"repo_name": "csparkresearch/ExpEYES17-Qt",
"id": "79f15defa9e7bb23edd10e68e77d881f2a0fe562",
"size": "9947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blocks/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19579"
},
{
"name": "HTML",
"bytes": "35913"
},
{
"name": "JavaScript",
"bytes": "667"
},
{
"name": "Makefile",
"bytes": "15356"
},
{
"name": "Python",
"bytes": "621843"
},
{
"name": "Ruby",
"bytes": "1448"
},
{
"name": "Shell",
"bytes": "822"
}
],
"symlink_target": ""
} |
import os
import tarfile
from contextlib import closing
from . import helpers
def gzip_then_store(filename, archivename):
with closing(tarfile.open(archivename, 'w:gz')) as tar:
tar.add(filename)
helpers.add_checksum(archivename)
return archivename
def Backup(cfg, rcfiles):
backupfiles = []
backupdir = cfg.get('dirs', 'backup-dir')
helpers.ensure_dir_exists(backupdir)
for rc in rcfiles:
orifile = helpers.get_rcfile(cfg, rc)
if os.path.lexists(orifile) and not os.path.islink(orifile):
fname = os.path.basename(orifile)
archivename = helpers.get_backupfile(cfg, rc)
backupfiles.append(gzip_then_store(orifile, archivename))
return backupfiles
| {
"content_hash": "853d4e450625f12c752dec4b3512b3e6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 33.63636363636363,
"alnum_prop": 0.6837837837837838,
"repo_name": "fudanchii/archie",
"id": "0a0df6c5a2e715551cd154f591094904e0ef5de7",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archie/backup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11385"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
import re
from streamlink.exceptions import FatalPluginError, NoStreamsError, PluginError
from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.plugin.api import useragents, validate
from streamlink.stream import HLSStream
from streamlink.utils.encoding import maybe_decode
log = logging.getLogger(__name__)
class Pixiv(Plugin):
"""Plugin for https://sketch.pixiv.net/lives"""
_url_re = re.compile(r"https?://sketch\.pixiv\.net/@?(?P<user>[^/]+)")
_post_key_re = re.compile(
r"""name=["']post_key["']\svalue=["'](?P<data>[^"']+)["']""")
_user_dict_schema = validate.Schema(
{
"user": {
"unique_name": validate.text,
"name": validate.all(validate.text,
validate.transform(maybe_decode))
},
validate.optional("hls_movie"): {
"url": validate.text
}
}
)
_user_schema = validate.Schema(
{
"owner": _user_dict_schema,
"performers": [
validate.any(_user_dict_schema, None)
]
}
)
_data_lives_schema = validate.Schema(
{
"data": {
"lives": [_user_schema]
}
},
validate.get("data"),
validate.get("lives")
)
api_lives = "https://sketch.pixiv.net/api/lives.json"
login_url_get = "https://accounts.pixiv.net/login"
login_url_post = "https://accounts.pixiv.net/api/login"
arguments = PluginArguments(
PluginArgument(
"username",
requires=["password"],
metavar="USERNAME",
help="""
The email/username used to register with pixiv.net
"""
),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
help="""
A pixiv.net account password to use with --pixiv-username
"""
),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached Pixiv credentials to initiate a new session
and reauthenticate.
"""),
PluginArgument(
"performer",
metavar="USER",
help="""
Select a co-host stream instead of the owner stream.
""")
)
def __init__(self, url):
super(Pixiv, self).__init__(url)
self._authed = (self.session.http.cookies.get("PHPSESSID")
and self.session.http.cookies.get("device_token"))
self.session.http.headers.update({
"User-Agent": useragents.FIREFOX,
"Referer": self.url
})
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _login(self, username, password):
res = self.session.http.get(self.login_url_get)
m = self._post_key_re.search(res.text)
if not m:
raise PluginError("Missing post_key, no login posible.")
post_key = m.group("data")
data = {
"lang": "en",
"source": "sketch",
"post_key": post_key,
"pixiv_id": username,
"password": password,
}
res = self.session.http.post(self.login_url_post, data=data)
res = self.session.http.json(res)
log.trace("{0!r}".format(res))
if res["body"].get("success"):
self.save_cookies()
log.info("Successfully logged in")
else:
log.error("Failed to log in.")
def hls_stream(self, hls_url):
log.debug("URL={0}".format(hls_url))
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
def get_streamer_data(self):
res = self.session.http.get(self.api_lives)
data = self.session.http.json(res, schema=self._data_lives_schema)
log.debug("Found {0} streams".format(len(data)))
m = self._url_re.match(self.url)
for item in data:
if item["owner"]["user"]["unique_name"] == m.group("user"):
return item
raise NoStreamsError(self.url)
def _get_streams(self):
login_username = self.get_option("username")
login_password = self.get_option("password")
if self.options.get("purge_credentials"):
self.clear_cookies()
self._authed = False
log.info("All credentials were successfully removed.")
if self._authed:
log.debug("Attempting to authenticate using cached cookies")
elif not self._authed and login_username and login_password:
self._login(login_username, login_password)
streamer_data = self.get_streamer_data()
performers = streamer_data.get("performers")
log.trace("{0!r}".format(streamer_data))
if performers:
co_hosts = []
# create a list of all available performers
for p in performers:
co_hosts += [(p["user"]["unique_name"], p["user"]["name"])]
log.info("Available hosts: {0}".format(", ".join(
["{0} ({1})".format(k, v) for k, v in co_hosts])))
# control if the host from --pixiv-performer is valid,
# if not let the User select a different host
if (self.get_option("performer")
and not self.get_option("performer") in [v[0] for v in co_hosts]):
# print the owner as 0
log.info("0 - {0} ({1})".format(
streamer_data["owner"]["user"]["unique_name"],
streamer_data["owner"]["user"]["name"]))
# print all other performer
for i, item in enumerate(co_hosts, start=1):
log.info("{0} - {1} ({2})".format(i, item[0], item[1]))
try:
number = int(self.input_ask(
"Enter the number you'd like to watch").split(" ")[0])
if number == 0:
# default stream
self.set_option("performer", None)
else:
# other co-hosts
self.set_option("performer", co_hosts[number - 1][0])
except FatalPluginError:
raise PluginError("Selected performer is invalid.")
except (IndexError, ValueError, TypeError):
raise PluginError("Input is invalid")
# ignore the owner stream, if a performer is selected
# or use it when there are no other performers
if not self.get_option("performer") or not performers:
return self.hls_stream(streamer_data["owner"]["hls_movie"]["url"])
# play a co-host stream
if performers and self.get_option("performer"):
for p in performers:
if p["user"]["unique_name"] == self.get_option("performer"):
# if someone goes online at the same time as Streamlink
# was used, the hls URL might not be in the JSON data
hls_movie = p.get("hls_movie")
if hls_movie:
return self.hls_stream(hls_movie["url"])
__plugin__ = Pixiv
| {
"content_hash": "419a1c1c4a29135047f8f5fdd34538b4",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 86,
"avg_line_length": 34.93427230046948,
"alnum_prop": 0.5284235989786319,
"repo_name": "back-to/streamlink",
"id": "c1f836851a998cb904c6714c19c67de4309216bb",
"size": "7465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/pixiv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1451380"
},
{
"name": "Shell",
"bytes": "18044"
}
],
"symlink_target": ""
} |
import threading
import time
def producer_thread():
global exit_event
while(True):
print 'Waiting for exit event..'
exit_event.wait(10)
def consumer_thread():
global exit_event
while(True):
print 'Waiting for exit event..'
exit_event.wait(10)
def main():
t1 = threading.Thread()
if __name__ == '__main__':
main() | {
"content_hash": "ccc6f8b1c5c27fa8c4e16b88980f77e0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 40,
"avg_line_length": 18.142857142857142,
"alnum_prop": 0.5853018372703412,
"repo_name": "devvenk/pypractice",
"id": "737f12790e6d351b6f3279c02e440e494647ebe2",
"size": "381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threadevent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18607"
}
],
"symlink_target": ""
} |
"""
sphinxcontrib.devhelp
~~~~~~~~~~~~~~~~~~~~~
Build HTML documentation and Devhelp_ support files.
.. _Devhelp: https://wiki.gnome.org/Apps/Devhelp
:copyright: Copyright 2007-2019 by the Sphinx team, see README.
:license: BSD, see LICENSE for details.
"""
import gzip
import re
from os import path
from typing import Any, Dict
from docutils import nodes
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.locale import get_translation
from sphinx.util import logging
from sphinx.util.nodes import NodeMatcher
from sphinx.util.osutil import make_filename
from sphinxcontrib.devhelp.version import __version__
try:
import xml.etree.ElementTree as etree
except ImportError:
import lxml.etree as etree # type: ignore
if False:
# For type annotation
from typing import List # NOQA
logger = logging.getLogger(__name__)
__ = get_translation(__name__, 'console')
package_dir = path.abspath(path.dirname(__file__))
class DevhelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs GNOME Devhelp file.
"""
name = 'devhelp'
epilog = __('To view the help file:\n'
'$ mkdir -p $HOME/.local/share/devhelp/books\n'
'$ ln -s $PWD/%(outdir)s $HOME/.local/share/devhelp/books/%(project)s\n'
'$ devhelp')
# don't copy the reST source
copysource = False
supported_image_types = ['image/png', 'image/gif', 'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
# type: () -> None
super().init()
self.out_suffix = '.html'
self.link_suffix = '.html'
def handle_finish(self):
# type: () -> None
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
# type: (str, str) -> None
logger.info(__('dumping devhelp index...'))
# Basic info
root = etree.Element('book',
title=self.config.html_title,
name=self.config.project,
link="index.html",
version=self.config.version)
tree = etree.ElementTree(root)
# TOC
chapters = etree.SubElement(root, 'chapters')
tocdoc = self.env.get_and_resolve_doctree(
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, parent):
# type: (nodes.Node, etree.Element) -> None
if isinstance(node, addnodes.compact_paragraph) or \
isinstance(node, nodes.bullet_list):
for subnode in node:
write_toc(subnode, parent)
elif isinstance(node, nodes.list_item):
item = etree.SubElement(parent, 'sub')
for subnode in node:
write_toc(subnode, item)
elif isinstance(node, nodes.reference):
parent.attrib['link'] = node['refuri']
parent.attrib['name'] = node.astext()
matcher = NodeMatcher(addnodes.compact_paragraph, toctree=Any)
for node in tocdoc.traverse(matcher): # type: addnodes.compact_paragraph
write_toc(node, chapters)
# Index
functions = etree.SubElement(root, 'functions')
index = IndexEntries(self.env).create_index(self)
def write_index(title, refs, subitems):
# type: (str, List[Any], Any) -> None
if len(refs) == 0:
pass
elif len(refs) == 1:
etree.SubElement(functions, 'function',
name=title, link=refs[0][1])
else:
for i, ref in enumerate(refs):
etree.SubElement(functions, 'function',
name="[%d] %s" % (i, title),
link=ref[1])
if subitems:
parent_title = re.sub(r'\s*\(.*\)\s*$', '', title)
for subitem in subitems:
write_index("%s %s" % (parent_title, subitem[0]),
subitem[1], [])
for (key, group) in index:
for title, (refs, subitems, key) in group:
write_index(title, refs, subitems)
# Dump the XML file
xmlfile = path.join(outdir, outname + '.devhelp.gz')
with gzip.open(xmlfile, 'w') as f:
tree.write(f, 'utf-8')
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.html')
app.add_builder(DevhelpBuilder)
app.add_message_catalog(__name__, path.join(package_dir, 'locales'))
app.add_config_value('devhelp_basename', lambda self: make_filename(self.project), None)
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| {
"content_hash": "566c60159f6e03898b8b0f0f9946815a",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 92,
"avg_line_length": 32.63461538461539,
"alnum_prop": 0.5704183853859752,
"repo_name": "lmregus/Portfolio",
"id": "6972378702b7e2b4259964a616870471ad5b67b1",
"size": "5091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/design_patterns/env/lib/python3.7/site-packages/sphinxcontrib/devhelp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27682"
},
{
"name": "C++",
"bytes": "25458"
},
{
"name": "CSS",
"bytes": "12842"
},
{
"name": "HTML",
"bytes": "49171"
},
{
"name": "Java",
"bytes": "99711"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Python",
"bytes": "42857"
},
{
"name": "Shell",
"bytes": "5710"
}
],
"symlink_target": ""
} |
import importlib
from .settings import singleton
@singleton
class LazyLoader:
def __init__(self):
self.__import_dict = {}
@property
def import_dict(self):
return self.__import_dict
@import_dict.setter
def import_dict(self, key, value):
self.__import_dict[key] = value
def import_(self, name, alias=None, parent=None, return_=True):
if alias is None:
alias = name
if self.__import_dict.get(alias, None) is not None and return_:
return self.__import_dict[alias]
if parent is not None:
exec("from {} import {} as {}".format(parent, name, "_" + alias))
self.__import_dict[alias] = locals()["_" + alias]
else:
module = importlib.import_module(name)
self.__import_dict[alias] = module
if return_:
return self.__import_dict[alias]
def __getitem__(self, key):
return self.__import_dict[key]
| {
"content_hash": "ed46b626f663ab60a4d10676fb24df54",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 26.696969696969695,
"alnum_prop": 0.6299659477866061,
"repo_name": "captain-pool/GSOC",
"id": "60a5cab4bd7ce53fafddbf4b7f870e2796164ea5",
"size": "881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "E3_Distill_ESRGAN/libs/lazy_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129354"
},
{
"name": "Shell",
"bytes": "4163"
},
{
"name": "Starlark",
"bytes": "2954"
}
],
"symlink_target": ""
} |
class RuleFinalNewline(object):
NAME = "Final Newline"
@staticmethod
def get_description(line_numbers):
result = "Missing final newline in file"
if len(line_numbers)>1: result+="s"
return result
@staticmethod
def rule(path,lines):
if len(lines) == 0: #Empty files are not considered by the standard to be lacking a newline.
return []
if lines[-1][-1] != "\n":
return [len(lines)]
return []
| {
"content_hash": "4b74f591ce1c4b980674ff3d3ccac0de",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 100,
"avg_line_length": 29.764705882352942,
"alnum_prop": 0.5592885375494071,
"repo_name": "imallett/QualityCpp",
"id": "3406278c5b9b4b3fac444939127d171fe05acdb0",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rules/final_newline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2150"
},
{
"name": "Python",
"bytes": "41368"
}
],
"symlink_target": ""
} |
def lint_command(tool_xml, lint_ctx):
root = tool_xml.getroot()
commands = root.findall("command")
if len(commands) > 1:
lint_ctx.error("More than one command tag found, behavior undefined.")
return
if len(commands) == 0:
lint_ctx.error("No command tag found, must specify a command template to execute.")
return
command = commands[0]
if "TODO" in command:
lint_ctx.warn("Command template contains TODO text.")
command_attrib = command.attrib
interpreter_type = None
for key, value in command_attrib.items():
if key == "interpreter":
interpreter_type = value
else:
lint_ctx.warn("Unknown attribute [%s] encountered on command tag." % key)
interpreter_info = ""
if interpreter_type:
interpreter_info = " with interpreter of type [%s]" % interpreter_type
lint_ctx.info("Tool contains a command%s." % interpreter_info)
| {
"content_hash": "f0907ddf551f76de98466e19a46b5540",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 91,
"avg_line_length": 35.407407407407405,
"alnum_prop": 0.6338912133891214,
"repo_name": "ssorgatem/pulsar",
"id": "d22a482242ac262701da68b9fc3a0cc6886af732",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galaxy/tools/linters/command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "241"
},
{
"name": "Makefile",
"bytes": "3661"
},
{
"name": "Python",
"bytes": "792577"
},
{
"name": "Shell",
"bytes": "12640"
}
],
"symlink_target": ""
} |
from datetime import datetime
from instagram_utils import extract_shortcode, get_images_near_some_other_image_via_shortcode
from os import environ, makedirs
from os.path import join
from shutil import copytree
import argparse
import json
DEFAULT_TOKEN = environ['INSTAGRAM_TOKEN']
TOKENHELPMSG = "Default is %s" % DEFAULT_TOKEN if DEFAULT_TOKEN else "(no default set)"
DEFAULT_BEFORE_MIN = 30
DEFAULT_AFTER_MIN = 240
DEFAULT_DISTANCE_M = 500
def beliebe(shortcode, args):
"""
TODO: make args better
returns:
a lot of stuff
"""
access_token = args.token
bargs = {}
bargs['access_token'] = access_token
bargs['seconds_before'] = args.minutes_before * 60
bargs['seconds_after'] = args.minutes_after * 60
bargs['dist_m'] = args.distance_in_meters
nearby_images = get_images_near_some_other_image_via_shortcode(shortcode, **bargs)
return nearby_images
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("shortcode", nargs = 1,
help = "Instagram web URL/shortcode")
parser.add_argument("--token", '-t', default = DEFAULT_TOKEN,
help = "Instagram access token %s" % TOKENHELPMSG)
parser.add_argument("--minutes-before", '-b', default = DEFAULT_BEFORE_MIN,
type = int,
help = "Limit search to photos X minutes-or-less before target photo's timestamp. Default is %s" % DEFAULT_BEFORE_MIN)
parser.add_argument("--minutes-after", '-a', default = DEFAULT_AFTER_MIN,
type = int,
help = "Limit search to photos X minutes-or-less after target photo's timestamp. Default is %s" % DEFAULT_AFTER_MIN)
parser.add_argument("--distance-in-meters", '-d', default = DEFAULT_DISTANCE_M,
type = int,
help = "Limit search to photos . Default is within X number of meters from target photo location. Default is %s" % DEFAULT_DISTANCE_M)
args = parser.parse_args()
shortcode = extract_shortcode(args.shortcode[0])
print("Fetching images near %s" % shortcode)
nearby_images = beliebe(shortcode, args)
pdir = "./pages/" + shortcode + '--' + datetime.now().strftime("%Y-%m-%d_%H%M%S")
# save into directory
copytree('./template', pdir)
datadir = join(pdir, 'data')
makedirs(datadir)
with open(join(datadir, 'images.json'), 'w') as fd:
json.dump(nearby_images, fd, indent = 2)
print("""
Run:
python3 -m http.server
In your browser, visit:
http://localhost:8000/{page_path}
""".format(page_path = pdir[2:]))
| {
"content_hash": "ebcf5d8cd506af430f5ddf0c323537ce",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 142,
"avg_line_length": 37.294117647058826,
"alnum_prop": 0.6612776025236593,
"repo_name": "dannguyen/seeing-is-beliebing",
"id": "6898f555298b332408176f9ec2470e18d5fcbf28",
"size": "2536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beliebe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "489"
},
{
"name": "JavaScript",
"bytes": "117351"
},
{
"name": "Python",
"bytes": "8553"
}
],
"symlink_target": ""
} |
"""Tests for views for REST APIs for frontpage"""
# pylint: disable=unused-argument
import pytest
from django.urls import reverse
from rest_framework import status
from channels.constants import POSTS_SORT_HOT, VALID_POST_SORT_TYPES
from channels.views.test_utils import (
default_post_response_data,
raise_error_on_submission_fetch,
raise_error_on_subreddit_fetch,
)
pytestmark = pytest.mark.betamax
def test_frontpage_empty(client, logged_in_profile):
"""test that frontpage is empty with no subscriptions"""
url = reverse("frontpage")
resp = client.get(url)
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {"posts": [], "pagination": {"sort": POSTS_SORT_HOT}}
@pytest.mark.parametrize("missing_user", [True, False])
def test_frontpage(
mocker, user_client, private_channel_and_contributor, reddit_factories, missing_user
):
"""View the front page"""
channel, user = private_channel_and_contributor
first_post = reddit_factories.text_post("my post", user, channel=channel)
second_post = reddit_factories.text_post("my 2nd post", user, channel=channel)
third_post = reddit_factories.text_post("my 3rd post", user, channel=channel)
fourth_post = reddit_factories.text_post("my 4th post", user, channel=channel)
url = reverse("frontpage")
with raise_error_on_submission_fetch(mocker), raise_error_on_subreddit_fetch(
mocker
):
resp = user_client.get(url)
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {
"posts": [
default_post_response_data(channel, post, user)
for post in [fourth_post, third_post, second_post, first_post]
],
"pagination": {"sort": POSTS_SORT_HOT},
}
@pytest.mark.parametrize("sort", VALID_POST_SORT_TYPES)
def test_frontpage_sorted(
mocker, user_client, private_channel_and_contributor, reddit_factories, sort
):
"""View the front page with sorted options"""
# note: these sort types are difficult to reproduce unique sort orders in the span of a test,
# so we're just checking that the APIs don't error
channel, user = private_channel_and_contributor
first_post = reddit_factories.text_post("my post", user, channel=channel)
second_post = reddit_factories.text_post("my 2nd post", user, channel=channel)
third_post = reddit_factories.text_post("my 3rd post", user, channel=channel)
fourth_post = reddit_factories.text_post("my 4th post", user, channel=channel)
url = reverse("frontpage")
with raise_error_on_submission_fetch(mocker), raise_error_on_subreddit_fetch(
mocker
):
resp = user_client.get(url, {"sort": sort})
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {
"posts": [
default_post_response_data(channel, post, user)
for post in [fourth_post, third_post, second_post, first_post]
],
"pagination": {"sort": sort},
}
@pytest.mark.parametrize(
"params,expected",
[
({}, {"after": "t3_3", "after_count": 5}),
(
{"after": "t3_3", "count": "5"},
{"after": "t3_7", "after_count": 10, "before": "t3_e", "before_count": 6},
),
(
{"after": "t3_a", "count": "3"},
{"after": "t3_b", "after_count": 8, "before": "t3_9", "before_count": 4},
),
({"before": "t3_e", "count": "6"}, {"after": "t3_3", "after_count": 5}),
],
)
def test_frontpage_pagination(
mocker, client, logged_in_profile, settings, params, expected
): # pylint: disable=too-many-arguments
"""Test that post pagination works"""
settings.OPEN_DISCUSSIONS_CHANNEL_POST_LIMIT = 5
url = reverse("frontpage")
with raise_error_on_submission_fetch(mocker), raise_error_on_subreddit_fetch(
mocker
):
resp = client.get(url, params)
expected["sort"] = POSTS_SORT_HOT
assert resp.status_code == status.HTTP_200_OK
assert resp.json()["pagination"] == expected
def test_frontpage_anonymous(mocker, client, public_channel):
"""Anonymous users should be able to see the front page"""
url = reverse("frontpage")
with raise_error_on_submission_fetch(mocker), raise_error_on_subreddit_fetch(
mocker
):
resp = client.get(url)
assert resp.status_code == status.HTTP_200_OK
assert resp.json()["pagination"] == {"sort": POSTS_SORT_HOT}
# Since the front page is shared between all channels it's hard to assert reproduceable results
assert isinstance(resp.json()["posts"], list) is True
| {
"content_hash": "5e7b66d1709383da446c7f5497dfe658",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 99,
"avg_line_length": 38.425,
"alnum_prop": 0.6484493602255476,
"repo_name": "mitodl/open-discussions",
"id": "f1fae90c39a7a8fedcbd569167f3d24dcce57cfd",
"size": "4611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "channels/views/frontpage_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
} |
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union
from great_expectations.rule_based_profiler.domain import SemanticDomainTypes
class SemanticTypeFilter(ABC):
@abstractmethod
def parse_semantic_domain_type_argument(
self,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
) -> List[SemanticDomainTypes]:
pass
@property
@abstractmethod
def table_column_name_to_inferred_semantic_domain_type_map(
self,
) -> Dict[str, SemanticDomainTypes]:
pass
| {
"content_hash": "1d24f85e151431880118f1f303d7076b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 28.40909090909091,
"alnum_prop": 0.6864,
"repo_name": "great-expectations/great_expectations",
"id": "d54e79757d5bc783518dc8378ee310696147a9bf",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "great_expectations/rule_based_profiler/semantic_type_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers import utils
AVAILABLE_PROTOCOLS = ('HTTP', 'HTTPS', 'TCP')
AVAILABLE_METHODS = ('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP')
class AddPoolAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
# provider is optional because some LBaaS implemetation does
# not support service-type extension.
provider = forms.ChoiceField(label=_("Provider"), required=False)
subnet_id = forms.ChoiceField(label=_("Subnet"))
protocol = forms.ChoiceField(label=_("Protocol"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddPoolAction, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
subnet_id_choices = [('', _("Select a Subnet"))]
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
protocol_choices = [('', _("Select a Protocol"))]
[protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS]
self.fields['protocol'].choices = protocol_choices
lb_method_choices = [('', _("Select a Method"))]
[lb_method_choices.append((m, m)) for m in AVAILABLE_METHODS]
self.fields['lb_method'].choices = lb_method_choices
# provider choice
try:
if api.neutron.is_extension_supported(request, 'service-type'):
provider_list = api.neutron.provider_list(request)
providers = [p for p in provider_list
if p['service_type'] == 'LOADBALANCER']
else:
providers = None
except Exception:
exceptions.handle(request,
_('Unable to retrieve providers list.'))
providers = []
if providers:
default_providers = [p for p in providers if p.get('default')]
if default_providers:
default_provider = default_providers[0]['name']
else:
default_provider = None
provider_choices = [(p['name'], p['name']) for p in providers
if p['name'] != default_provider]
if default_provider:
provider_choices.insert(
0, (default_provider,
_("%s (default)") % default_provider))
else:
if providers is None:
msg = _("Provider for Load Balancer is not supported")
else:
msg = _("No provider is available")
provider_choices = [('', msg)]
self.fields['provider'].widget.attrs['readonly'] = True
self.fields['provider'].choices = provider_choices
class Meta:
name = _("Add New Pool")
permissions = ('openstack.services.network',)
help_text = _("Create Pool for current project.\n\n"
"Assign a name and description for the pool. "
"Choose one subnet where all members of this "
"pool must be on. "
"Select the protocol and load balancing method "
"for this pool. "
"Admin State is UP (checked) by default.")
class AddPoolStep(workflows.Step):
action_class = AddPoolAction
contributes = ("name", "description", "subnet_id", "provider",
"protocol", "lb_method", "admin_state_up")
def contribute(self, data, context):
context = super(AddPoolStep, self).contribute(data, context)
if data:
return context
class AddPool(workflows.Workflow):
slug = "addpool"
name = _("Add Pool")
finalize_button_name = _("Add")
success_message = _('Added pool "%s".')
failure_message = _('Unable to add pool "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPoolStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
try:
api.lbaas.pool_create(request, **context)
return True
except Exception:
return False
class AddVipAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
floatip_address = forms.ChoiceField(
label=_("VIP Address from Floating IPs"),
widget=forms.Select(attrs={'disabled': 'disabled'}),
required=False)
other_address = forms.IPField(required=False,
initial="",
version=forms.IPv4,
mask=False)
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validators.validate_port_range])
protocol = forms.ChoiceField(label=_("Protocol"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'persistence'
}))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'persistence',
'data-persistence-app_cookie': 'APP_COOKIE',
}))
connection_limit = forms.IntegerField(
required=False, min_value=-1, label=_("Connection Limit"),
help_text=_("Maximum number of connections allowed "
"for the VIP or '-1' if the limit is not set"))
admin_state_up = forms.BooleanField(
label=_("Admin State"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVipAction, self).__init__(request, *args, **kwargs)
self.fields['other_address'].label = _("Specify a free IP address"
" from %s") % args[0]['subnet']
protocol_choices = [('', _("Select a Protocol"))]
[protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS]
self.fields['protocol'].choices = protocol_choices
session_persistence_choices = [('', _("No Session Persistence"))]
for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'):
session_persistence_choices.append((mode.lower(), mode))
self.fields[
'session_persistence'].choices = session_persistence_choices
floatip_address_choices = [('', _("Currently Not Supported"))]
self.fields['floatip_address'].choices = floatip_address_choices
def clean(self):
cleaned_data = super(AddVipAction, self).clean()
persistence = cleaned_data.get('session_persistence')
if persistence:
cleaned_data['session_persistence'] = persistence.upper()
if (cleaned_data.get('session_persistence') == 'APP_COOKIE' and
not cleaned_data.get('cookie_name')):
msg = _('Cookie name is required for APP_COOKIE persistence.')
self._errors['cookie_name'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Specify VIP")
permissions = ('openstack.services.network',)
help_text = _("Create a VIP for this pool. "
"Assign a name and description for the VIP. "
"Specify an IP address and port for the VIP. "
"Choose the protocol and session persistence "
"method for the VIP."
"Specify the max connections allowed. "
"Admin State is UP (checked) by default.")
class AddVipStep(workflows.Step):
action_class = AddVipAction
depends_on = ("pool_id", "subnet")
contributes = ("name", "description", "floatip_address",
"other_address", "protocol_port", "protocol",
"session_persistence", "cookie_name",
"connection_limit", "admin_state_up")
def contribute(self, data, context):
context = super(AddVipStep, self).contribute(data, context)
return context
class AddVip(workflows.Workflow):
slug = "addvip"
name = _("Add VIP")
finalize_button_name = _("Add")
success_message = _('Added VIP "%s".')
failure_message = _('Unable to add VIP "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddVipStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
if context['other_address'] == '':
context['address'] = context['floatip_address']
else:
if not context['floatip_address'] == '':
self.failure_message = _('Only one address can be specified. '
'Unable to add VIP "%s".')
return False
else:
context['address'] = context['other_address']
try:
pool = api.lbaas.pool_get(request, context['pool_id'])
context['subnet_id'] = pool['subnet_id']
except Exception:
context['subnet_id'] = None
self.failure_message = _('Unable to retrieve the specified pool. '
'Unable to add VIP "%s".')
return False
if context['session_persistence']:
stype = context['session_persistence']
if stype == 'APP_COOKIE':
cookie = context['cookie_name']
context['session_persistence'] = {'type': stype,
'cookie_name': cookie}
else:
context['session_persistence'] = {'type': stype}
else:
context['session_persistence'] = {}
try:
api.lbaas.vip_create(request, **context)
return True
except Exception:
return False
class AddMemberAction(workflows.Action):
pool_id = forms.ChoiceField(label=_("Pool"))
members = forms.MultipleChoiceField(
label=_("Member(s)"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
error_messages={'required':
_('At least one member must be specified')},
help_text=_("Select members for this pool "))
weight = forms.IntegerField(
max_value=256, min_value=1, label=_("Weight"), required=False,
help_text=_("Relative part of requests this pool member serves "
"compared to others. \nThe same weight will be applied to "
"all the selected members and can be modified later. "
"Weight must be in the range 1 to 256.")
)
protocol_port = forms.IntegerField(
label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value between 1 and 65535. "
"The same port will be used for all the selected "
"members and can be modified later."),
validators=[validators.validate_port_range]
)
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMemberAction, self).__init__(request, *args, **kwargs)
pool_id_choices = [('', _("Select a Pool"))]
try:
tenant_id = self.request.user.tenant_id
pools = api.lbaas.pool_list(request, tenant_id=tenant_id)
except Exception:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
members_choices = []
try:
servers, has_more = api.nova.server_list(request)
except Exception:
servers = []
exceptions.handle(request,
_('Unable to retrieve instances list.'))
if len(servers) == 0:
self.fields['members'].label = _(
"No servers available. To add a member, you "
"need at least one running instance.")
self.fields['members'].required = True
self.fields['members'].help_text = _("Select members "
"for this pool ")
self.fields['pool_id'].required = False
self.fields['protocol_port'].required = False
return
for m in servers:
members_choices.append((m.id, m.name))
self.fields['members'].choices = sorted(
members_choices,
key=lambda member: member[1])
class Meta:
name = _("Add New Member")
permissions = ('openstack.services.network',)
help_text = _("Add member(s) to the selected pool.\n\n"
"Choose one or more listed instances to be "
"added to the pool as member(s). "
"Assign a numeric weight for the selected member(s). "
"Specify the port number the selected member(s) "
"operate(s) on; e.g., 80. \n\n"
"There can only be one port associated with "
"each instance.")
class AddMemberStep(workflows.Step):
action_class = AddMemberAction
contributes = ("pool_id", "members", "protocol_port", "weight",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMemberStep, self).contribute(data, context)
return context
class AddMember(workflows.Workflow):
slug = "addmember"
name = _("Add Member")
finalize_button_name = _("Add")
success_message = _('Added member(s).')
failure_message = _('Unable to add member(s).')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMemberStep,)
def handle(self, request, context):
for m in context['members']:
params = {'device_id': m}
try:
plist = api.neutron.port_list(request, **params)
except Exception:
return False
if plist:
context['address'] = plist[0].fixed_ips[0]['ip_address']
try:
context['member_id'] = api.lbaas.member_create(
request, **context).id
except Exception:
return False
return True
class AddMonitorAction(workflows.Action):
type = forms.ChoiceField(
label=_("Type"),
choices=[('ping', _('PING')),
('tcp', _('TCP')),
('http', _('HTTP')),
('https', _('HTTPS'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'type'
}))
delay = forms.IntegerField(
min_value=1,
label=_("Delay"),
help_text=_("The minimum time in seconds between regular checks "
"of a member"))
timeout = forms.IntegerField(
min_value=1,
label=_("Timeout"),
help_text=_("The maximum time in seconds for a monitor to wait "
"for a reply"))
max_retries = forms.IntegerField(
max_value=10, min_value=1,
label=_("Max Retries (1~10)"),
help_text=_("Number of permissible failures before changing "
"the status of member to inactive"))
http_method = forms.ChoiceField(
initial="GET",
required=False,
choices=[('GET', _('GET'))],
label=_("HTTP Method"),
help_text=_("HTTP method used to check health status of a member"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('HTTP Method'),
'data-type-https': _('HTTP Method')
}))
url_path = forms.CharField(
initial="/",
required=False,
max_length=80,
label=_("URL"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('URL'),
'data-type-https': _('URL')
}))
expected_codes = forms.RegexField(
initial="200",
required=False,
max_length=80,
regex=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$',
label=_("Expected HTTP Status Codes"),
help_text=_("Expected code may be a single value (e.g. 200), "
"a list of values (e.g. 200, 202), "
"or range of values (e.g. 200-204)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('Expected HTTP Status Codes'),
'data-type-https': _('Expected HTTP Status Codes')
}))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMonitorAction, self).__init__(request, *args, **kwargs)
def clean(self):
cleaned_data = super(AddMonitorAction, self).clean()
type_opt = cleaned_data.get('type')
if type_opt in ['http', 'https']:
http_method_opt = cleaned_data.get('http_method')
url_path = cleaned_data.get('url_path')
expected_codes = cleaned_data.get('expected_codes')
if not http_method_opt:
msg = _('Please choose a HTTP method')
self._errors['http_method'] = self.error_class([msg])
if not url_path:
msg = _('Please specify an URL')
self._errors['url_path'] = self.error_class([msg])
if not expected_codes:
msg = _('Please enter a single value (e.g. 200), '
'a list of values (e.g. 200, 202), '
'or range of values (e.g. 200-204)')
self._errors['expected_codes'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Add New Monitor")
permissions = ('openstack.services.network',)
help_text = _("Create a monitor template.\n\n"
"Select type of monitoring. "
"Specify delay, timeout, and retry limits "
"required by the monitor. "
"Specify method, URL path, and expected "
"HTTP codes upon success.")
class AddMonitorStep(workflows.Step):
action_class = AddMonitorAction
contributes = ("type", "delay", "timeout", "max_retries",
"http_method", "url_path", "expected_codes",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMonitorStep, self).contribute(data, context)
if data:
return context
class AddMonitor(workflows.Workflow):
slug = "addmonitor"
name = _("Add Monitor")
finalize_button_name = _("Add")
success_message = _('Added monitor')
failure_message = _('Unable to add monitor')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMonitorStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_health_monitor_create(
request, **context).get('id')
return True
except Exception:
exceptions.handle(request, _("Unable to add monitor."))
return False
class AddPMAssociationAction(workflows.Action):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(AddPMAssociationAction, self).__init__(request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = _("Select a monitor template "
"for %s") % context['pool_name']
monitor_id_choices = [('', _("Select a Monitor"))]
try:
tenant_id = self.request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in context['pool_monitors']:
display_name = utils.get_monitor_display_name(m)
monitor_id_choices.append((m.id, display_name))
except Exception:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Associate a health monitor with target pool.")
class AddPMAssociationStep(workflows.Step):
action_class = AddPMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(AddPMAssociationStep, self).contribute(data, context)
if data:
return context
class AddPMAssociation(workflows.Workflow):
slug = "addassociation"
name = _("Associate Monitor")
finalize_button_name = _("Associate")
success_message = _('Associated monitor.')
failure_message = _('Unable to associate monitor.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_create(
request, **context)
return True
except Exception:
exceptions.handle(request, _("Unable to associate monitor."))
return False
class DeletePMAssociationAction(workflows.Action):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(DeletePMAssociationAction, self).__init__(
request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = (_("Select a health monitor of %s") %
context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
monitors = api.lbaas.pool_health_monitor_list(request)
for m in monitors:
if m.id in context['pool_monitors']:
display_name = utils.get_monitor_display_name(m)
monitor_id_choices.append((m.id, display_name))
except Exception:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Disassociate a health monitor from target pool. ")
class DeletePMAssociationStep(workflows.Step):
action_class = DeletePMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(DeletePMAssociationStep, self).contribute(
data, context)
if data:
return context
class DeletePMAssociation(workflows.Workflow):
slug = "deleteassociation"
name = _("Disassociate Monitor")
finalize_button_name = _("Disassociate")
success_message = _('Disassociated monitor.')
failure_message = _('Unable to disassociate monitor.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (DeletePMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_delete(
request, **context)
return True
except Exception:
exceptions.handle(request, _("Unable to disassociate monitor."))
return False
| {
"content_hash": "e2d5ec879137c9f03899034d42ab39c7",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 79,
"avg_line_length": 39.605828220858896,
"alnum_prop": 0.5554738024241955,
"repo_name": "spandanb/horizon",
"id": "90cfc88a4b41795594afac8d96867cbb02556c03",
"size": "26445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/loadbalancers/workflows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Mako based Configuration Generator
"""
import logging
import re
from mako.exceptions import CompileException, SyntaxException
from mako.template import Template
logger = logging.getLogger("confgen")
class TemplateSyntaxException(BaseException):
"""
This exception is raised if the rendering of the mako template failed
"""
pass
class MakoConfigGenerator:
"""
Config Generator that utilizes the Mako Template Engine
"""
# variable name regular expression
_variable_name_regex = r"(\$\{[ ]*(?P<name>[a-zA-Z0-9_]+)[ ]*\})"
# template content
_template_string = None
_template_variable_dict = dict()
@property
def template_string(self):
return self._template_string
@template_string.setter
def template_string(self, value):
self._template_string = value
# clean list and parse data again
self._parse_variable_from_template_string()
@property
def template_variables(self):
return sorted(list(self._template_variable_dict.keys()))
def __init__(self, template_string=""):
if type(template_string) is not str:
raise ValueError("template string must be a string type")
self.template_string = template_string
self._parse_variable_from_template_string()
def _parse_variable_from_template_string(self):
"""populates the template_variables list with the variables that are found in the config template
"""
self._template_variable_dict = dict()
if self.template_string:
for var in re.findall(self._variable_name_regex, self.template_string):
logger.debug("found variable %s" % var[1])
self.add_variable(var[1])
def add_variable(self, variable):
"""create a variable with no value
:param variable:
:return:
"""
self.set_variable_value(variable, "")
def set_variable_value(self, variable, value=""):
"""change the value of the given variable. If the variable doesn't exist, it will be created
:param variable:
:param value:
:return:
"""
self._template_variable_dict[variable] = value
def get_variable_value(self, variable):
"""get the value of a variable
:param variable:
:return:
"""
return self._template_variable_dict[variable]
def get_rendered_result(self, remove_empty_lines=True):
"""render template result
:param remove_empty_lines: true, if blank lines should be removed
:return:
"""
try:
result = Template(self.template_string).render(**self._template_variable_dict)
except SyntaxException as ex:
msg = "Template Syntax error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
except CompileException as ex:
msg = "Template Compile error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
except AttributeError as ex:
msg = "Template Attribute error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
except Exception as ex:
msg = "Template Attribute error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
# remove empty lines
if remove_empty_lines:
lines = result.splitlines()
result = ""
counter = 1
for line in lines:
if line != "":
result += line
if len(lines) != counter:
result += "\n"
counter += 1
return result
| {
"content_hash": "a2ce984db83621665aa69fcbc96c3225",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 105,
"avg_line_length": 29.6,
"alnum_prop": 0.5977130977130977,
"repo_name": "tobymccann/flask-base",
"id": "190c9721adcb9523d4ccd302ed8b5efcec5952e1",
"size": "3848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/utils/confgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5757"
},
{
"name": "HTML",
"bytes": "118028"
},
{
"name": "JavaScript",
"bytes": "9422"
},
{
"name": "Python",
"bytes": "112456"
}
],
"symlink_target": ""
} |
from flask import Flask
from redis import Redis
import os
import socket
app = Flask(__name__)
redis = Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1'), port=6379)
@app.route('/')
def hello():
redis.incr('hits')
return 'Hello Container World! I have been seen %s times and my hostname is %s.\n' % (redis.get('hits'),socket.gethostname())
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| {
"content_hash": "46074115da93247b1aac2c350b9ec5d2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 129,
"avg_line_length": 25.41176470588235,
"alnum_prop": 0.6504629629629629,
"repo_name": "xiaopeng163/docker-k8s-lab",
"id": "23728cc953a1b8a9081f892fd625b2a2dcdbe402",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/docker/flask-redis/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "628"
},
{
"name": "Python",
"bytes": "1774"
},
{
"name": "Ruby",
"bytes": "7020"
},
{
"name": "Shell",
"bytes": "2250"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from oslo_versionedobjects import fields
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, str):
loaded = json.loads(value)
return loaded
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return json.dumps(value)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
| {
"content_hash": "60f2221dbf063086ce766f8a5567a3bd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 48,
"avg_line_length": 25.5,
"alnum_prop": 0.6748366013071896,
"repo_name": "openstack/heat",
"id": "19680f6d96b54f09e3faf804b3fd2d13c028c54b",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/objects/fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
} |
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Typical25Page(page_module.Page):
def __init__(self, url, page_set):
super(Typical25Page, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/typical_25.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Typical25PageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(Typical25PageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/typical_25.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: Alexa games #48
'http://www.nick.com/games',
# Why: Alexa sports #45
'http://www.rei.com/',
# Why: Alexa sports #50
'http://www.fifa.com/',
# Why: Alexa shopping #41
'http://www.gamestop.com/ps3',
# Why: Alexa shopping #25
'http://www.barnesandnoble.com/u/books-bestselling-books/379003057/',
# Why: Alexa news #55
('http://www.economist.com/news/science-and-technology/21573529-small-'
'models-cosmic-phenomena-are-shedding-light-real-thing-how-build'),
# Why: Alexa news #67
'http://www.theonion.com',
'http://arstechnica.com/',
# Why: Alexa home #10
'http://allrecipes.com/Recipe/Pull-Apart-Hot-Cross-Buns/Detail.aspx',
'http://www.html5rocks.com/en/',
'http://www.mlb.com/',
# pylint: disable=C0301
'http://gawker.com/5939683/based-on-a-true-story-is-a-rotten-lie-i-hope-you-never-believe',
'http://www.imdb.com/title/tt0910970/',
'http://www.flickr.com/search/?q=monkeys&f=hp',
'http://money.cnn.com/',
'http://www.nationalgeographic.com/',
'http://premierleague.com',
'http://www.osubeavers.com/',
'http://walgreens.com',
'http://colorado.edu',
('http://www.ticketmaster.com/JAY-Z-and-Justin-Timberlake-tickets/artist/'
'1837448?brand=none&tm_link=tm_homeA_rc_name2'),
# pylint: disable=C0301
'http://www.theverge.com/2013/3/5/4061684/inside-ted-the-smartest-bubble-in-the-world',
'http://www.airbnb.com/',
'http://www.ign.com/',
# Why: Alexa health #25
'http://www.fda.gov',
]
for url in urls_list:
self.AddPage(Typical25Page(url, self))
| {
"content_hash": "57c0795873659b40d114d5999b8d47fc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 97,
"avg_line_length": 36.40277777777778,
"alnum_prop": 0.6409767264402899,
"repo_name": "chromium2014/src",
"id": "adc55f1feaae870bf0be7d6156a87f3621addad0",
"size": "2813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/typical_25.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1889381"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "39993418"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "220757674"
},
{
"name": "CSS",
"bytes": "973910"
},
{
"name": "Java",
"bytes": "6583410"
},
{
"name": "JavaScript",
"bytes": "20967999"
},
{
"name": "Mercury",
"bytes": "9480"
},
{
"name": "Objective-C",
"bytes": "943237"
},
{
"name": "Objective-C++",
"bytes": "7190130"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "674461"
},
{
"name": "Python",
"bytes": "10430892"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1337040"
},
{
"name": "Standard ML",
"bytes": "3705"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
from tempest_lib import decorators
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute.security_groups import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class SecurityGroupsNegativeTestJSON(base.BaseSecurityGroupsTest):
@classmethod
def setup_clients(cls):
super(SecurityGroupsNegativeTestJSON, cls).setup_clients()
cls.client = cls.security_groups_client
@classmethod
def resource_setup(cls):
super(SecurityGroupsNegativeTestJSON, cls).resource_setup()
cls.neutron_available = CONF.service_available.neutron
def _generate_a_non_existent_security_group_id(self):
security_group_id = []
body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
# Generate a non-existent security group id
while True:
non_exist_id = data_utils.rand_int_id(start=999)
if self.neutron_available:
non_exist_id = data_utils.rand_uuid()
if non_exist_id not in security_group_id:
break
return non_exist_id
@test.attr(type=['negative'])
@test.idempotent_id('673eaec1-9b3e-48ed-bdf1-2786c1b9661c')
@test.services('network')
def test_security_group_get_nonexistent_group(self):
# Negative test:Should not be able to GET the details
# of non-existent Security Group
non_exist_id = self._generate_a_non_existent_security_group_id()
self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
non_exist_id)
@decorators.skip_because(bug="1161411",
condition=CONF.service_available.neutron)
@test.attr(type=['negative'])
@test.idempotent_id('1759c3cb-b0fc-44b7-86ce-c99236be911d')
@test.services('network')
def test_security_group_create_with_invalid_group_name(self):
# Negative test: Security Group should not be created with group name
# as an empty string/with white spaces/chars more than 255
s_description = data_utils.rand_name('description')
# Create Security Group with empty string as group name
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group, "", s_description)
# Create Security Group with white space in group name
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group, " ",
s_description)
# Create Security Group with group name longer than 255 chars
s_name = 'securitygroup-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group, s_name,
s_description)
@decorators.skip_because(bug="1161411",
condition=CONF.service_available.neutron)
@test.attr(type=['negative'])
@test.idempotent_id('777b6f14-aca9-4758-9e84-38783cfa58bc')
@test.services('network')
def test_security_group_create_with_invalid_group_description(self):
# Negative test: Security Group should not be created with description
# longer than 255 chars. Empty description is allowed by the API
# reference, however.
s_name = data_utils.rand_name('securitygroup')
# Create Security Group with group description longer than 255 chars
s_description = 'description-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group, s_name,
s_description)
@test.idempotent_id('9fdb4abc-6b66-4b27-b89c-eb215a956168')
@testtools.skipIf(CONF.service_available.neutron,
"Neutron allows duplicate names for security groups")
@test.attr(type=['negative'])
@test.services('network')
def test_security_group_create_with_duplicate_name(self):
# Negative test:Security Group with duplicate name should not
# be created
s_name = data_utils.rand_name('securitygroup')
s_description = data_utils.rand_name('description')
self.create_security_group(s_name, s_description)
# Now try the Security Group with the same 'Name'
self.assertRaises(lib_exc.BadRequest,
self.client.create_security_group, s_name,
s_description)
@test.attr(type=['negative'])
@test.idempotent_id('36a1629f-c6da-4a26-b8b8-55e7e5d5cd58')
@test.services('network')
def test_delete_the_default_security_group(self):
# Negative test:Deletion of the "default" Security Group should Fail
default_security_group_id = None
body = self.client.list_security_groups()
for i in range(len(body)):
if body[i]['name'] == 'default':
default_security_group_id = body[i]['id']
break
# Deleting the "default" Security Group
self.assertRaises(lib_exc.BadRequest,
self.client.delete_security_group,
default_security_group_id)
@test.attr(type=['negative'])
@test.idempotent_id('6727c00b-214c-4f9e-9a52-017ac3e98411')
@test.services('network')
def test_delete_nonexistent_security_group(self):
# Negative test:Deletion of a non-existent Security Group should fail
non_exist_id = self._generate_a_non_existent_security_group_id()
self.assertRaises(lib_exc.NotFound,
self.client.delete_security_group, non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('1438f330-8fa4-4aeb-8a94-37c250106d7f')
@test.services('network')
def test_delete_security_group_without_passing_id(self):
# Negative test:Deletion of a Security Group with out passing ID
# should Fail
self.assertRaises(lib_exc.NotFound,
self.client.delete_security_group, '')
@test.idempotent_id('00579617-fe04-4e1c-9d08-ca7467d2e34b')
@testtools.skipIf(CONF.service_available.neutron,
"Neutron does not check the security group ID")
@test.attr(type=['negative'])
@test.services('network')
def test_update_security_group_with_invalid_sg_id(self):
# Update security_group with invalid sg_id should fail
s_name = data_utils.rand_name('sg')
s_description = data_utils.rand_name('description')
# Create a non int sg_id
sg_id_invalid = data_utils.rand_name('sg')
self.assertRaises(lib_exc.BadRequest,
self.client.update_security_group, sg_id_invalid,
name=s_name, description=s_description)
@test.idempotent_id('cda8d8b4-59f8-4087-821d-20cf5a03b3b1')
@testtools.skipIf(CONF.service_available.neutron,
"Neutron does not check the security group name")
@test.attr(type=['negative'])
@test.services('network')
def test_update_security_group_with_invalid_sg_name(self):
# Update security_group with invalid sg_name should fail
securitygroup = self.create_security_group()
self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
# Update Security Group with group name longer than 255 chars
s_new_name = 'securitygroup-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest,
self.client.update_security_group,
securitygroup_id, name=s_new_name)
@test.idempotent_id('97d12b1c-a610-4194-93f1-ba859e718b45')
@testtools.skipIf(CONF.service_available.neutron,
"Neutron does not check the security group description")
@test.attr(type=['negative'])
@test.services('network')
def test_update_security_group_with_invalid_sg_des(self):
# Update security_group with invalid sg_des should fail
securitygroup = self.create_security_group()
self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
# Update Security Group with group description longer than 255 chars
s_new_des = 'des-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest,
self.client.update_security_group,
securitygroup_id, description=s_new_des)
@test.attr(type=['negative'])
@test.idempotent_id('27edee9c-873d-4da6-a68a-3c256efebe8f')
@test.services('network')
def test_update_non_existent_security_group(self):
# Update a non-existent Security Group should Fail
non_exist_id = self._generate_a_non_existent_security_group_id()
s_name = data_utils.rand_name('sg')
s_description = data_utils.rand_name('description')
self.assertRaises(lib_exc.NotFound,
self.client.update_security_group,
non_exist_id, name=s_name,
description=s_description)
| {
"content_hash": "9051903047d918d991509ca89db71c22",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 79,
"avg_line_length": 46.88717948717949,
"alnum_prop": 0.6367713004484304,
"repo_name": "redhat-cip/tempest",
"id": "d8cbe3d17da33b0555f7878d30a489a5f2034593",
"size": "9787",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/compute/security_groups/test_security_groups_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2691544"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
"""Authentication module for using Google Compute service accounts."""
import json
import urllib2
from boto.auth_handler import AuthHandler
from boto.auth_handler import NotReadyToAuthenticate
META_DATA_SERVER_BASE_URL=(
'http://169.254.169.254/computeMetadata/v1')
SERVICE_ACCOUNT_SCOPES_URL=(META_DATA_SERVER_BASE_URL +
'/instance/service-accounts/%s/scopes?alt=json')
SERVICE_ACCOUNT_TOKEN_URL=(META_DATA_SERVER_BASE_URL +
'/instance/service-accounts/%s/token?alt=json')
GS_SCOPES = set([
'https://www.googleapis.com/auth/devstorage.read_only',
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/devstorage.full_control',
])
class ComputeAuth(AuthHandler):
"""Google Compute service account auth handler.
What happens is that the boto library reads the system config file
(/etc/boto.cfg) and looks at a config value called 'plugin_directory'. It
then loads the python files in that and find classes derived from
boto.auth_handler.AuthHandler.
"""
capability = ['google-oauth2', 's3']
def __init__(self, path, config, provider):
self.service_account = config.get('GoogleCompute', 'service_account', '')
if provider.name == 'google' and self.service_account:
self.scopes = self.__GetGSScopes()
if not self.scopes:
raise NotReadyToAuthenticate()
else:
raise NotReadyToAuthenticate()
def __GetJSONMetadataValue(self, url):
try:
request = urllib2.Request(url)
request.add_unredirected_header('Metadata-Flavor', 'Google')
data = urllib2.urlopen(request).read()
return json.loads(data)
except (urllib2.URLError, urllib2.HTTPError, IOError), e:
return None
def __GetGSScopes(self):
"""Return all Google Storage scopes available on this VM."""
scopes = self.__GetJSONMetadataValue(
SERVICE_ACCOUNT_SCOPES_URL % self.service_account)
if scopes:
return list(GS_SCOPES.intersection(set(scopes)))
return None
def __GetAccessToken(self):
"""Return an oauth2 access token for Google Storage."""
token_info = self.__GetJSONMetadataValue(
SERVICE_ACCOUNT_TOKEN_URL % self.service_account)
if token_info:
return token_info['access_token']
return None
def add_auth(self, http_request):
http_request.headers['Authorization'] = (
'OAuth %s' % self.__GetAccessToken())
| {
"content_hash": "7fad65ae7fe3d496202bd9a554482889",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 33.859154929577464,
"alnum_prop": 0.699667221297837,
"repo_name": "feoff3/compute-image-packages",
"id": "aad5e7560366467885dd497674c2fcbdb3ddd7ce",
"size": "2997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "197388"
},
{
"name": "Shell",
"bytes": "61167"
}
],
"symlink_target": ""
} |
import os, sys, logging, json, boto3;
sys.path.append(os.path.dirname(__file__));
from aws_helpers import aws_session, get_account;
from botocore.config import Config
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level="INFO");
if __name__ == "__main__":
params = dict();
for i in range(0, len(sys.argv) - 1): params[sys.argv[i]] = sys.argv[i + 1];
logger = logging.getLogger(__file__)
account = params["--account"] if "--account" in params.keys() else get_account(boto3.Session());
role = params["--role"] if "--role" in params.keys() else "";
region = params["--region"] if "--region" in params.keys() else "us-east-1";
timeout = int(params["--ttl"]) if "--ttl" in params.keys() else 30;
session = boto3.Session();
debug = False if "--debug" in params.keys() and int(params["--debug"]) < 1 else True;
policy_name = params["--policy"] if "--policy" in params.keys() else "";
permissions = json.loads(params["--perms"]) if "--perm" in params.keys() else {}; # {"<permission>": "<resource>"}
if role != "":
assert account != "", "Account cannot be empty...";
session = aws_session("arn:aws:iam::{0}:role/{1}".format(account, role));
try:
assert policy_name != "", "Policy name cannot be empty...";
client = session.client("iam", region, config=Config(read_timeout=timeout, connect_timeout=timeout));
policies = client.list_policies()["Policies"];
assert any(x["PolicyName"] == policy_name for x in policies), "Policy ({0}) does not exist...".format(policy_name);
policy = next(x for x in policies if x["PolicyName"] == policy_name);
full_policy = client.get_policy(PolicyArn=policy["Arn"]);
policy_versions = client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"];
latest_version = next(x for x in policy_versions);
latest_version = client.get_policy_version(VersionId=latest_version["VersionId"], PolicyArn=policy["Arn"]);
statements = latest_version["PolicyVersion"]["Document"]["Statement"];
for k, v in permissions.items():
logger.info("Checking for: {0}; {1}".format(k, v));
found = False;
for p in statements:
if isinstance(p["Action"], list):
if not any(x == k for x in p["Action"]):
if p["Resource"] == v:
found = True;
logger.info("Adding {0} to actions...".format(k));
p["Action"].append(k);
else:
if p["Action"] != k and v == p["Resource"]:
found = True;
temp_actions = [];
temp_actions.append(p["Action"]);
temp_actions.append(k);
p["Action"] = temp_actions;
if not found:
logger.info("Permission ({0}) not found for matching resource {1}. Adding new statement to policy...".format(k, v));
statements.append({"Effect": "Allow", "Action": [k], "Resource": v});
if not debug:
client.update_policy(PolicyId=full_policy["PolicyId"], Content="{0}".format(latest_version["PolicyVersion"]["Document"]));
else: logger.info(latest_version["PolicyVersion"]["Document"]);
except Exception as ex: logger.error(ex);
| {
"content_hash": "8c39f0d503a900112c119aad9d6f8f5c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 130,
"avg_line_length": 56.43859649122807,
"alnum_prop": 0.6011812247435498,
"repo_name": "agancsos/python",
"id": "ecd3ab0d301b49e9771a6804de66ea0948f9fd7c",
"size": "3720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws/aws_update_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1882241"
},
{
"name": "Shell",
"bytes": "1865"
}
],
"symlink_target": ""
} |
import pytest
import sys
from web3.middleware import make_stalecheck_middleware
from web3.middleware.stalecheck import (
_isfresh,
StaleBlockchain,
)
from web3.utils.datastructures import AttributeDict
if sys.version_info >= (3, 3):
from unittest.mock import Mock, patch
pytestmark = pytest.mark.skipif(sys.version_info < (3, 3), reason="needs Mock library from 3.3")
@pytest.fixture
def now():
return 3141592653
@pytest.fixture
def allowable_delay():
return 3 * 24 * 60 * 60
@pytest.fixture
def request_middleware(allowable_delay):
middleware = make_stalecheck_middleware(allowable_delay)
make_request, web3 = Mock(), Mock()
initialized = middleware(make_request, web3)
# for easier mocking, later:
initialized.web3 = web3
initialized.make_request = make_request
return initialized
def stub_block(timestamp):
return AttributeDict({
'timestamp': timestamp,
'number': 123,
})
def test_is_not_fresh_with_no_block():
assert not _isfresh(None, 1)
def test_is_not_fresh(now):
with patch('time.time', return_value=now):
SECONDS_ALLOWED = 2 * 86400
stale = stub_block(now - SECONDS_ALLOWED - 1)
assert not _isfresh(stale, SECONDS_ALLOWED)
def test_is_fresh(now):
with patch('time.time', return_value=now):
SECONDS_ALLOWED = 2 * 86400
stale = stub_block(now - SECONDS_ALLOWED)
assert _isfresh(stale, SECONDS_ALLOWED)
def test_stalecheck_pass(request_middleware):
with patch('web3.middleware.stalecheck._isfresh', return_value=True):
method, params = object(), object()
request_middleware(method, params)
request_middleware.make_request.assert_called_once_with(method, params)
def test_stalecheck_fail(request_middleware, now):
with patch('web3.middleware.stalecheck._isfresh', return_value=False):
request_middleware.web3.eth.getBlock.return_value = stub_block(now)
with pytest.raises(StaleBlockchain):
request_middleware('', [])
@pytest.mark.parametrize(
'rpc_method',
[
'eth_getBlockByNumber',
]
)
def test_stalecheck_ignores_get_by_block_methods(request_middleware, rpc_method):
# This is especially critical for getBlock('latest') which would cause infinite recursion
with patch('web3.middleware.stalecheck._isfresh', side_effect=[False, True]):
request_middleware(rpc_method, [])
assert not request_middleware.web3.eth.getBlock.called
def test_stalecheck_calls_isfresh_with_empty_cache(request_middleware, allowable_delay):
with patch('web3.middleware.stalecheck._isfresh', side_effect=[False, True]) as freshspy:
block = object()
request_middleware.web3.eth.getBlock.return_value = block
request_middleware('', [])
cache_call, live_call = freshspy.call_args_list
assert cache_call[0] == (None, allowable_delay)
assert live_call[0] == (block, allowable_delay)
def test_stalecheck_adds_block_to_cache(request_middleware, allowable_delay):
with patch('web3.middleware.stalecheck._isfresh', side_effect=[False, True, True]) as freshspy:
block = object()
request_middleware.web3.eth.getBlock.return_value = block
# cache miss
request_middleware('', [])
cache_call, live_call = freshspy.call_args_list
assert freshspy.call_count == 2
assert cache_call == ((None, allowable_delay), )
assert live_call == ((block, allowable_delay), )
# cache hit
request_middleware('', [])
assert freshspy.call_count == 3
assert freshspy.call_args == ((block, allowable_delay), )
| {
"content_hash": "25dc824a5a566d82c268931e4e8258d6",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 99,
"avg_line_length": 31.646551724137932,
"alnum_prop": 0.6774720784527377,
"repo_name": "pipermerriam/web3.py",
"id": "55af4d8b565d5e3c7546661a7d46a0a6908cd4d2",
"size": "3672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/middleware/test_stalecheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "999"
},
{
"name": "Python",
"bytes": "619517"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, division
import unittest
import sys
import numpy
import scipy.linalg
import theano
from theano import gof, tensor, scalar
from theano.tests import unittest_tools as utt
class Solve(gof.Op):
"""
Find the solution to the linear equation Ax=b.
A is a 2d matrix and b is a 1d or 2d matrix.
It use numpy.solve to find the solution.
"""
# TODO: Add class options to use the performance-enhancing flags
# sym_pos, lower, overwrite_a, overwrite_b
# TODO: Add C code that calls the underlying LAPACK routines
# and keeps a memory workspace from call to call as a non-default Op
# output
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, A, b):
A_ = tensor.as_tensor_variable(A)
b_ = tensor.as_tensor_variable(b)
if A_.broadcastable != (False, False):
raise TypeError("A must be a matrix", A_.type)
if b_.broadcastable not in ((False,), (True, False), (False, False)):
raise TypeError("b must be a matrix or vector", b_.type)
odtype = scalar.upcast(A_.dtype, b_.dtype)
otype = tensor.TensorType(broadcastable=b_.broadcastable, dtype=odtype)
return gof.Apply(op=self, inputs=[A_, b_], outputs=[otype()])
def perform(self, node, inp, out):
A, b = inp
output, = out
ret = scipy.linalg.solve(A, b)
if ret.dtype != node.outputs[0].dtype:
print("WARNING: Solve.perform() required cast.", file=sys.stderr)
ret = theano._asarray(ret, dtype=node.outputs[0].dtype)
output[0] = ret
solve = Solve()
# TODO: test dtype conversion
# TODO: test that invalid types are rejected by make_node
# TODO: test that each valid type for A and b works correctly
class T_solve(unittest.TestCase):
def setUp(self):
self.rng = numpy.random.RandomState(utt.fetch_seed(666))
def test0(self):
A = self.rng.randn(5, 5)
b = numpy.arange(5, dtype=float)
x = scipy.linalg.solve(A, b)
Ax = numpy.dot(A, x)
are = tensor.numeric_grad.abs_rel_err(Ax, b)
self.assertTrue(numpy.all(are < 1.0e-5), (are, Ax, b))
# print A,b
# print numpy.dot(A,x)
| {
"content_hash": "92b3da8c5dded0108c8540d28964b541",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 31.346666666666668,
"alnum_prop": 0.6201616333475117,
"repo_name": "JazzeYoung/VeryDeepAutoEncoder",
"id": "d6eafa68b1e64fa82db91e0df49398a034be379b",
"size": "2351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theano/sandbox/solve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "260790"
},
{
"name": "C++",
"bytes": "323987"
},
{
"name": "CSS",
"bytes": "1750"
},
{
"name": "Cuda",
"bytes": "2767955"
},
{
"name": "HTML",
"bytes": "4611"
},
{
"name": "Jupyter Notebook",
"bytes": "4603376"
},
{
"name": "Makefile",
"bytes": "116"
},
{
"name": "Python",
"bytes": "16514506"
},
{
"name": "Shell",
"bytes": "16447"
}
],
"symlink_target": ""
} |
"""
profiling.viewer
~~~~~~~~~~~~~~~~
A text user interface application which inspects statistics. To run it
easily do:
.. sourcecode:: console
$ profiling view SOURCE
::
viewer = StatisticsViewer()
loop = viewer.loop()
loop.run()
"""
from __future__ import absolute_import
from collections import deque
import urwid
from urwid import connect_signal as on
from . import sortkeys
__all__ = ['StatisticsTable', 'StatisticsViewer', 'fmt']
class Formatter(object):
def _markup(get_string, get_attr=None):
def markup(self, *args, **kwargs):
string = get_string(self, *args, **kwargs)
if get_attr is None:
return string
attr = get_attr(self, *args, **kwargs)
return (attr, string)
return markup
_numeric = {'align': 'right', 'wrap': 'clip'}
def _make_text(get_markup, **text_kwargs):
def make_text(self, *args, **kwargs):
markup = get_markup(self, *args, **kwargs)
return urwid.Text(markup, **text_kwargs)
return make_text
# percent
def format_percent(self, ratio, denom=1, unit=False):
# width: 4~5 (with unit)
# examples:
# 0.01: 1.00%
# 0.1: 10.0%
# 1: 100%
try:
ratio /= float(denom)
except ZeroDivisionError:
ratio = 0
if round(ratio, 2) >= 1:
precision = 0
elif round(ratio, 2) >= 0.1:
precision = 1
else:
precision = 2
string = ('{:.' + str(precision) + 'f}').format(ratio * 100)
if unit:
return string + '%'
else:
return string
def attr_ratio(self, ratio, denom=1, unit=False):
try:
ratio /= float(denom)
except ZeroDivisionError:
ratio = 0
if ratio > 0.9:
return 'danger'
elif ratio > 0.7:
return 'caution'
elif ratio > 0.3:
return 'warning'
elif ratio > 0.1:
return 'notice'
elif ratio <= 0:
return 'zero'
markup_percent = _markup(format_percent, attr_ratio)
make_percent_text = _make_text(markup_percent, **_numeric)
# int
def format_int(self, num, units='kMGTPEZY'):
# width: 1~6
# examples:
# 0: 0
# 1: 1
# 10: 10
# 100: 100
# 1000: 1.0K
# 10000: 10.0K
# 100000: 100.0K
# 1000000: 1.0M
# -10: -11
unit = None
unit_iter = iter(units)
while abs(round(num, 1)) >= 1e3:
num /= 1e3
try:
unit = next(unit_iter)
except StopIteration:
# overflow or underflow.
return 'o/f' if num > 0 else 'u/f'
if unit is None:
return '{:.0f}'.format(num)
else:
return '{:.1f}{}'.format(num, unit)
def attr_int(self, num):
return None if num else 'zero'
markup_int = _markup(format_int, attr_int)
make_int_text = _make_text(markup_int, **_numeric)
# int or n/a
def format_int_or_na(self, num):
# width: 1~6
# examples:
# 0: n/a
# 1: 1
# 10: 10
# 100: 100
# 1000: 1.0K
# 10000: 10.0K
# 100000: 100.0K
# 1000000: 1.0M
# -10: -11
if num == 0:
return 'n/a'
else:
return self.format_int(num)
markup_int_or_na = _markup(format_int_or_na, attr_int)
make_int_or_na_text = _make_text(markup_int_or_na, **_numeric)
# time
def format_time(self, sec):
# width: 1~6 (most cases)
# examples:
# 0: 0
# 0.000001: 1us
# 0.000123: 123us
# 0.012345: 12ms
# 0.123456: 123ms
# 1.234567: 1.2sec
# 12.34567: 12.3sec
# 123.4567: 2min3s
# 6120: 102min
if sec == 0:
return '0'
elif sec < 1e-3:
# 1us ~ 999us
return '{:.0f}us'.format(sec * 1e6)
elif sec < 1:
# 1ms ~ 999ms
return '{:.0f}ms'.format(sec * 1e3)
elif sec < 60:
# 1.0sec ~ 59.9sec
return '{:.1f}sec'.format(sec)
elif sec < 600:
# 1min0s ~ 9min59s
return '{:.0f}min{:.0f}s'.format(sec // 60, sec % 60)
else:
return '{:.0f}min'.format(sec // 60)
def attr_time(self, sec):
if sec == 0:
return 'zero'
elif sec < 1e-3:
return 'usec'
elif sec < 1:
return 'msec'
elif sec < 60:
return 'sec'
else:
return 'min'
markup_time = _markup(format_time, attr_time)
make_time_text = _make_text(markup_time, **_numeric)
# stats
def markup_stats(self, stats):
if stats.name:
loc = ('({0}:{1})'
''.format(stats.module or stats.filename, stats.lineno))
return [('name', stats.name), ' ', ('loc', loc)]
else:
return ('loc', stats.module or stats.filename)
make_stat_text = _make_text(markup_stats, wrap='clip')
del _markup
del _make_text
fmt = Formatter()
class StatisticWidget(urwid.TreeWidget):
signals = ['expanded', 'collapsed']
icon_chars = ('+', '-', ' ') # collapsed, expanded, leaf
def __init__(self, node):
super(StatisticWidget, self).__init__(node)
self._w = urwid.AttrWrap(self._w, None, StatisticsViewer.focus_map)
def selectable(self):
return True
@property
def expanded(self):
return self._expanded
@expanded.setter
def expanded(self, expanded):
in_init = not hasattr(self, 'expanded')
self._expanded = expanded
if in_init:
return
if expanded:
urwid.emit_signal(self, 'expanded')
else:
urwid.emit_signal(self, 'collapsed')
def get_mark(self):
"""Gets an expanded, collapsed, or leaf icon."""
if self.is_leaf:
char = self.icon_chars[2]
else:
char = self.icon_chars[int(self.expanded)]
return urwid.SelectableIcon(('mark', char), 0)
def load_inner_widget(self):
node = self.get_node()
return node.table.make_row(node)
def get_indented_widget(self):
icon = self.get_mark()
widget = self.get_inner_widget()
node = self.get_node()
widget = urwid.Columns([('fixed', 1, icon), widget], 1)
indent = (node.get_depth() - 1)
widget = urwid.Padding(widget, left=indent)
return widget
def update_mark(self):
widget = self._w.base_widget
try:
widget.widget_list[0] = self.get_mark()
except (TypeError, AttributeError):
return
def update_expanded_icon(self):
self.update_mark()
def expand(self):
self.expanded = True
self.update_mark()
def collapse(self):
self.expanded = False
self.update_mark()
def keypress(self, size, key):
command = self._command_map[key]
if command == urwid.ACTIVATE:
key = '-' if self.expanded else '+'
elif command == urwid.CURSOR_RIGHT:
key = '+'
elif self.expanded and command == urwid.CURSOR_LEFT:
key = '-'
return super(StatisticWidget, self).keypress(size, key)
class EmptyWidget(urwid.Widget):
"""A widget which doesn't render anything."""
def __init__(self, rows=0):
super(EmptyWidget, self).__init__()
self._rows = rows
def rows(self, size, focus=False):
return self._rows
def render(self, size, focus=False):
return urwid.SolidCanvas(' ', size[0], self.rows(size, focus))
class StatisticsWidget(StatisticWidget):
def load_inner_widget(self):
return EmptyWidget()
def get_indented_widget(self):
return self.get_inner_widget()
def get_mark(self):
raise TypeError('Statistics widget has no mark')
def update(self):
pass
def unexpand(self):
pass
class StatisticNodeBase(urwid.TreeNode):
def __init__(self, stats=None, parent=None, key=None, depth=None,
table=None):
super(StatisticNodeBase, self).__init__(stats, parent, key, depth)
self.table = table
def get_focus(self):
widget, focus = super(StatisticNodeBase, self).get_focus()
if self.table is not None:
self.table.walker.set_focus(self)
return widget, focus
def get_widget(self, reload=False):
if self._widget is None or reload:
self._widget = self.load_widget()
self.setup_widget(self._widget)
return self._widget
def load_widget(self):
return self._widget_class(self)
def setup_widget(self, widget):
if self.table is None:
return
stats = self.get_value()
if hash(stats) in self.table._expanded_stat_hashes:
widget.expand()
class NullStatisticWidget(StatisticWidget):
def __init__(self, node):
urwid.TreeWidget.__init__(self, node)
def get_inner_widget(self):
widget = urwid.Text(('weak', '- Not Available -'), align='center')
widget = urwid.Filler(widget)
widget = urwid.BoxAdapter(widget, 3)
return widget
class NullStatisticNode(StatisticNodeBase):
_widget_class = NullStatisticWidget
class LeafStatisticNode(StatisticNodeBase):
_widget_class = StatisticWidget
class StatisticNode(StatisticNodeBase, urwid.ParentNode):
def deep_usage(self):
stats = self.get_value()
table = self.get_root()
try:
return stats.deep_time / table.cpu_time
except AttributeError:
return 0.0
def load_widget(self):
if self.is_root():
widget_class = StatisticsWidget
else:
widget_class = StatisticWidget
widget = widget_class(self)
widget.collapse()
return widget
def setup_widget(self, widget):
super(StatisticNode, self).setup_widget(widget)
if self.get_depth() == 0:
# just expand the root node
widget.expand()
return
table = self.table
if table is None:
return
on(widget, 'expanded', table._widget_expanded, widget)
on(widget, 'collapsed', table._widget_collapsed, widget)
def load_child_keys(self):
stats = self.get_value()
if stats is None:
return ()
return stats.sorted(self.table.order)
def load_child_node(self, stats):
depth = self.get_depth() + 1
node_class = StatisticNode if len(stats) else LeafStatisticNode
return node_class(stats, self, stats, depth, self.table)
class StatisticsListBox(urwid.TreeListBox):
signals = ['focus_changed']
def change_focus(self, *args, **kwargs):
super(StatisticsListBox, self).change_focus(*args, **kwargs)
focus = self.get_focus()
urwid.emit_signal(self, 'focus_changed', focus)
class StatisticsWalker(urwid.TreeWalker):
signals = ['focus_changed']
def set_focus(self, focus):
super(StatisticsWalker, self).set_focus(focus)
urwid.emit_signal(self, 'focus_changed', focus)
class StatisticsTable(urwid.WidgetWrap):
#: The column declarations. Define it with a list of (name, align, width,
#: order) tuples.
columns = [('FUNCTION', 'left', ('weight', 1), sortkeys.by_function)]
#: The initial order.
order = sortkeys.by_function
title = None
stats = None
time = None
def __init__(self, viewer):
self._expanded_stat_hashes = set()
self.walker = StatisticsWalker(NullStatisticNode())
on(self.walker, 'focus_changed', self._walker_focus_changed)
tbody = StatisticsListBox(self.walker)
thead = urwid.AttrMap(self.make_columns([
urwid.Text(name, align, 'clip')
for name, align, __, __ in self.columns
]), None)
header = urwid.Columns([])
widget = urwid.Frame(tbody, urwid.Pile([header, thead]))
super(StatisticsTable, self).__init__(widget)
self.viewer = viewer
self.update_frame()
def make_row(self, node):
stats = node.get_value()
return self.make_columns(self.make_cells(node, stats))
def make_cells(self, node, stats):
yield fmt.make_stat_text(stats)
@classmethod
def make_columns(cls, column_widgets):
widget_list = []
widths = (width for __, __, width, __ in cls.columns)
for width, widget in zip(widths, column_widgets):
widget_list.append(width + (widget,))
return urwid.Columns(widget_list, 1)
@property
def tbody(self):
return self._w.body
@tbody.setter
def tbody(self, body):
self._w.body = body
@property
def thead(self):
return self._w.header.contents[1][0]
@thead.setter
def thead(self, thead):
self._w.header.contents[1] = (thead, ('pack', None))
@property
def header(self):
return self._w.header.contents[0][0]
@header.setter
def header(self, header):
self._w.header.contents[0] = (header, ('pack', None))
@property
def footer(self):
return self._w.footer
@footer.setter
def footer(self, footer):
self._w.footer = footer
def get_focus(self):
return self.tbody.get_focus()
def set_focus(self, focus):
self.tbody.set_focus(focus)
def get_path(self):
"""Gets the path to the focused statistic. Each step is a hash of
statistic object.
"""
path = deque()
__, node = self.get_focus()
while not node.is_root():
stats = node.get_value()
path.appendleft(hash(stats))
node = node.get_parent()
return path
def find_node(self, node, path):
"""Finds a node by the given path from the given node."""
for hash_value in path:
if isinstance(node, LeafStatisticNode):
break
for stats in node.get_child_keys():
if hash(stats) == hash_value:
node = node.get_child_node(stats)
break
else:
break
return node
def get_stats(self):
return self.stats
def set_result(self, stats, cpu_time=0.0, wall_time=0.0,
title=None, at=None):
self.stats = stats
self.cpu_time = cpu_time
self.wall_time = wall_time
self.title = title
self.at = at
if not self.viewer.paused:
self.refresh()
def sort_stats(self, order=sortkeys.by_deep_time):
assert callable(order)
self.order = order
self.refresh()
def shift_order(self, delta):
orders = [order for __, __, __, order in self.columns if order]
x = orders.index(self.order)
order = orders[(x + delta) % len(orders)]
self.sort_stats(order)
def refresh(self):
stats = self.get_stats()
node = StatisticNode(stats, table=self)
path = self.get_path()
node = self.find_node(node, path)
self.set_focus(node)
def update_frame(self, focus=None):
# set thead attr
if self.viewer.paused:
thead_attr = 'thead.paused'
elif not self.viewer.active:
thead_attr = 'thead.inactive'
else:
thead_attr = 'thead'
self.thead.set_attr_map({None: thead_attr})
# set sorting column in thead attr
for x, (__, __, __, order) in enumerate(self.columns):
attr = thead_attr + '.sorted' if order is self.order else None
widget = self.thead.base_widget.contents[x][0]
text, __ = widget.get_text()
widget.set_text((attr, text))
if self.viewer.paused:
return
# update header
stats = self.get_stats()
if stats is None:
return
title = self.title
time = self.time
if title or time:
if time is not None:
time_string = '{:%H:%M:%S}'.format(time)
if title and time:
markup = [('weak', title), ' ', time_string]
elif title:
markup = title
else:
markup = time_string
meta_info = urwid.Text(markup, align='right')
else:
meta_info = None
fraction_string = '({0}/{1})'.format(
fmt.format_time(self.cpu_time),
fmt.format_time(self.wall_time))
try:
cpu_usage = self.cpu_time / self.wall_time
except ZeroDivisionError:
cpu_usage = 0.0
cpu_info = urwid.Text([
'CPU ', fmt.markup_percent(cpu_usage, unit=True),
' ', ('weak', fraction_string)])
# set header columns
col_opts = ('weight', 1, False)
self.header.contents = \
[(w, col_opts) for w in [cpu_info, meta_info] if w]
def focus_hotspot(self, size):
widget, __ = self.tbody.get_focus()
while widget:
node = widget.get_node()
widget.expand()
widget = widget.first_child()
self.tbody.change_focus(size, node)
def defocus(self):
__, node = self.get_focus()
self.set_focus(node.get_root())
def keypress(self, size, key):
base = super(StatisticsTable, self)
command = self._command_map[key]
if key == ']':
self.shift_order(+1)
return True
elif key == '[':
self.shift_order(-1)
return True
elif key == '>':
self.focus_hotspot(size)
return True
elif command == self._command_map['esc']:
self.defocus()
return True
elif command == self._command_map['right']:
widget, node = self.tbody.get_focus()
if widget.expanded:
heavy_widget = widget.first_child()
if heavy_widget is not None:
heavy_node = heavy_widget.get_node()
self.tbody.change_focus(size, heavy_node)
return True
elif command == self._command_map['left']:
widget, node = self.tbody.get_focus()
if not widget.expanded:
parent_node = node.get_parent()
if not parent_node.is_root():
self.tbody.change_focus(size, parent_node)
return True
elif command == self._command_map[' ']:
if self.viewer.paused:
self.viewer.resume()
else:
self.viewer.pause()
return True
return base.keypress(size, key)
# signal handlers
def _walker_focus_changed(self, focus):
self.update_frame(focus)
def _widget_expanded(self, widget):
stats = widget.get_node().get_value()
self._expanded_stat_hashes.add(hash(stats))
def _widget_collapsed(self, widget):
stats = widget.get_node().get_value()
self._expanded_stat_hashes.discard(hash(stats))
class StatisticsViewer(object):
weak_color = 'light green'
palette = [
('weak', weak_color, ''),
('focus', 'standout', '', 'standout'),
# ui
('thead', 'dark cyan, standout', '', 'standout'),
('thead.paused', 'dark red, standout', '', 'standout'),
('thead.inactive', 'brown, standout', '', 'standout'),
('mark', 'dark magenta', ''),
# risk
('danger', 'dark red', '', 'blink'),
('caution', 'light red', '', 'blink'),
('warning', 'brown', '', 'blink'),
('notice', 'dark green', '', 'blink'),
# clock
('min', 'dark red', ''),
('sec', 'brown', ''),
('msec', '', ''),
('usec', weak_color, ''),
# etc
('zero', weak_color, ''),
('name', 'bold', ''),
('loc', 'dark blue', ''),
]
# add thead.*.sorted palette entries
for entry in palette[:]:
attr = entry[0]
if attr is not None and attr.startswith('thead'):
fg, bg, mono = entry[1:4]
palette.append((attr + '.sorted', fg + ', underline',
bg, mono + ', underline'))
focus_map = {None: 'focus'}
focus_map.update((x[0], 'focus') for x in palette)
#: Whether the viewer is active.
active = False
#: Whether the viewer is paused.
paused = False
def unhandled_input(self, key):
if key in ('q', 'Q'):
raise urwid.ExitMainLoop()
def __init__(self):
self.table = StatisticsTable(self)
self.widget = urwid.Padding(self.table, right=1)
def loop(self, *args, **kwargs):
kwargs.setdefault('unhandled_input', self.unhandled_input)
loop = urwid.MainLoop(self.widget, self.palette, *args, **kwargs)
return loop
def set_profiler_class(self, profiler_class):
table_class = profiler_class.table_class
if type(self.table) is table_class: # don't use isinstance().
return
self.table = table_class(self)
self.widget.original_widget = self.table
def set_result(self, stats, cpu_time=0.0, wall_time=0.0,
title=None, at=None):
if self.paused:
self._pending = (stats, cpu_time, wall_time, title, at)
else:
self.table.set_result(stats, cpu_time, wall_time, title, at)
def activate(self):
self.active = True
self.table.update_frame()
def inactivate(self):
self.active = False
self.table.update_frame()
def pause(self):
self.paused = True
self.table.update_frame()
def resume(self):
self.paused = False
try:
stats, cpu_time, wall_time, title, at = self._pending
except AttributeError:
self.table.update_frame()
else:
del self._pending
self.table.set_result(stats, cpu_time, wall_time, title, at)
def use_vim_command_map(self):
urwid.command_map['h'] = urwid.command_map['left']
urwid.command_map['j'] = urwid.command_map['down']
urwid.command_map['k'] = urwid.command_map['up']
urwid.command_map['l'] = urwid.command_map['right']
def use_game_command_map(self):
urwid.command_map['a'] = urwid.command_map['left']
urwid.command_map['s'] = urwid.command_map['down']
urwid.command_map['w'] = urwid.command_map['up']
urwid.command_map['d'] = urwid.command_map['right']
| {
"content_hash": "9f683c551e186a10e089b153565a5f3c",
"timestamp": "",
"source": "github",
"line_count": 790,
"max_line_length": 78,
"avg_line_length": 28.99367088607595,
"alnum_prop": 0.5408862693734993,
"repo_name": "sublee/profiling",
"id": "19f692e4f1a898d9e753bbf566c64a204f29c14c",
"size": "22929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiling/viewer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1705"
},
{
"name": "Python",
"bytes": "118175"
}
],
"symlink_target": ""
} |
from typing import MutableMapping, MutableSequence
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
from google.cloud.batch_v1.types import job as gcb_job
from google.cloud.batch_v1.types import task
__protobuf__ = proto.module(
package="google.cloud.batch.v1",
manifest={
"CreateJobRequest",
"GetJobRequest",
"DeleteJobRequest",
"ListJobsRequest",
"ListJobsResponse",
"ListTasksRequest",
"ListTasksResponse",
"GetTaskRequest",
"OperationMetadata",
},
)
class CreateJobRequest(proto.Message):
r"""CreateJob Request.
Attributes:
parent (str):
Required. The parent resource name where the
Job will be created. Pattern:
"projects/{project}/locations/{location}".
job_id (str):
ID used to uniquely identify the Job within its parent
scope. This field should contain at most 63 characters and
must start with lowercase characters. Only lowercase
characters, numbers and '-' are accepted. The '-' character
cannot be the first or the last one. A system generated ID
will be used if the field is not set.
The job.name field in the request will be ignored and the
created resource name of the Job will be
"{parent}/jobs/{job_id}".
job (google.cloud.batch_v1.types.Job):
Required. The Job to create.
request_id (str):
Optional. An optional request ID to identify
requests. Specify a unique request ID so that if
you must retry your request, the server will
know to ignore the request if it has already
been completed. The server will guarantee that
for at least 60 minutes since the first request.
For example, consider a situation where you make
an initial request and t he request times out.
If you make the request again with the same
request ID, the server can check if original
operation with the same request ID was received,
and if so, will ignore the second request. This
prevents clients from accidentally creating
duplicate commitments.
The request ID must be a valid UUID with the
exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
job_id: str = proto.Field(
proto.STRING,
number=2,
)
job: gcb_job.Job = proto.Field(
proto.MESSAGE,
number=3,
message=gcb_job.Job,
)
request_id: str = proto.Field(
proto.STRING,
number=4,
)
class GetJobRequest(proto.Message):
r"""GetJob Request.
Attributes:
name (str):
Required. Job name.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class DeleteJobRequest(proto.Message):
r"""DeleteJob Request.
Attributes:
name (str):
Job name.
reason (str):
Optional. Reason for this deletion.
request_id (str):
Optional. An optional request ID to identify
requests. Specify a unique request ID so that if
you must retry your request, the server will
know to ignore the request if it has already
been completed. The server will guarantee that
for at least 60 minutes after the first request.
For example, consider a situation where you make
an initial request and t he request times out.
If you make the request again with the same
request ID, the server can check if original
operation with the same request ID was received,
and if so, will ignore the second request. This
prevents clients from accidentally creating
duplicate commitments.
The request ID must be a valid UUID with the
exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
reason: str = proto.Field(
proto.STRING,
number=2,
)
request_id: str = proto.Field(
proto.STRING,
number=4,
)
class ListJobsRequest(proto.Message):
r"""ListJob Request.
Attributes:
parent (str):
Parent path.
filter (str):
List filter.
page_size (int):
Page size.
page_token (str):
Page token.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
class ListJobsResponse(proto.Message):
r"""ListJob Response.
Attributes:
jobs (MutableSequence[google.cloud.batch_v1.types.Job]):
Jobs.
next_page_token (str):
Next page token.
unreachable (MutableSequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
jobs: MutableSequence[gcb_job.Job] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcb_job.Job,
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
unreachable: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
class ListTasksRequest(proto.Message):
r"""ListTasks Request.
Attributes:
parent (str):
Required. Name of a TaskGroup from which Tasks are being
requested. Pattern:
"projects/{project}/locations/{location}/jobs/{job}/taskGroups/{task_group}".
filter (str):
Task filter, null filter matches all Tasks.
Filter string should be of the format
State=TaskStatus.State e.g. State=RUNNING
page_size (int):
Page size.
page_token (str):
Page token.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
filter: str = proto.Field(
proto.STRING,
number=2,
)
page_size: int = proto.Field(
proto.INT32,
number=3,
)
page_token: str = proto.Field(
proto.STRING,
number=4,
)
class ListTasksResponse(proto.Message):
r"""ListTasks Response.
Attributes:
tasks (MutableSequence[google.cloud.batch_v1.types.Task]):
Tasks.
next_page_token (str):
Next page token.
unreachable (MutableSequence[str]):
Locations that could not be reached.
"""
@property
def raw_page(self):
return self
tasks: MutableSequence[task.Task] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=task.Task,
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
unreachable: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
class GetTaskRequest(proto.Message):
r"""Request for a single Task by name.
Attributes:
name (str):
Required. Task name.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class OperationMetadata(proto.Message):
r"""Represents the metadata of the long-running operation.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the operation was
created.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the operation finished
running.
target (str):
Output only. Server-defined resource path for
the target of the operation.
verb (str):
Output only. Name of the verb executed by the
operation.
status_message (str):
Output only. Human-readable status of the
operation, if any.
requested_cancellation (bool):
Output only. Identifies whether the user has requested
cancellation of the operation. Operations that have
successfully been cancelled have [Operation.error][] value
with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``.
api_version (str):
Output only. API version used to start the
operation.
"""
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
end_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
target: str = proto.Field(
proto.STRING,
number=3,
)
verb: str = proto.Field(
proto.STRING,
number=4,
)
status_message: str = proto.Field(
proto.STRING,
number=5,
)
requested_cancellation: bool = proto.Field(
proto.BOOL,
number=6,
)
api_version: str = proto.Field(
proto.STRING,
number=7,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "1d2293972809d10d5f6dece678408a59",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 89,
"avg_line_length": 27.72622478386167,
"alnum_prop": 0.582371894813429,
"repo_name": "googleapis/python-batch",
"id": "a31b3b8c3928cb35d0919270e187a5ee6c252734",
"size": "10221",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/batch_v1/types/batch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1240465"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
import os, sys
import argparse
import gzip
try:
import cPickle as pickle
except:
import pickle
##__________________________________________________________________||
parser = argparse.ArgumentParser()
parser.add_argument('pickle', help = 'path to a pickle file')
args = parser.parse_args()
##__________________________________________________________________||
pickle_path = os.path.abspath(args.pickle)
##__________________________________________________________________||
os.chdir(os.path.dirname(pickle_path))
##__________________________________________________________________||
dirname = 'python_modules'
tarname = dirname + '.tar.gz'
if os.path.exists(tarname) and not os.path.exists(dirname):
tar = tarfile.open(tarname)
tar.extractall()
tar.close()
sys.path.insert(0, dirname)
##__________________________________________________________________||
f = gzip.open(os.path.basename(pickle_path), 'rb')
package = pickle.load(f)
print(package)
##__________________________________________________________________||
| {
"content_hash": "eba0f28d4cf5f4bc02bc123d5c2be8b3",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 70,
"avg_line_length": 28.2972972972973,
"alnum_prop": 0.42406876790830944,
"repo_name": "alphatwirl/alphatwirl",
"id": "3cfe5f91918dec7c8974c5d03da815210a9b4e4d",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alphatwirl/concurrently/example_load.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3603"
},
{
"name": "Python",
"bytes": "775977"
},
{
"name": "R",
"bytes": "1222"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
from core import *
from differentiator import runSimulation
# 1*101*101 gridblocks
# radius 2,500 ft
# Firstly, we can specify the dimension of the cartesian grid
nz, ny, nx = 1, 151, 151
dims = (nz, ny, nx)
g = Grid(dims)
# Then, we can specify the whole reservoir dimension
Lz, Ly, Lx = 75, 2500, 2500
resDimension = (Lz, Ly, Lx)
# Build the fluid and rock model
# See to this later!
f = Fluid(refRho=62.428, refPres=14.7, compress=3.5*1e-6, mu=10)
r = Rock(refPoro=0.18, refPres=14.7, compress=0, perm=0.015)
# rho is in lbm/ft^3
# refPres is in psi
# compress is in psi^-1
# mu is in cP
# perm is in D (Darcy)
# We contain all these informations in a Reservoir object
res = Reservoir(grid=g, fluid=f, rock=r, resDim=resDimension)
# By default, the moment we declare a Node object, a no-flow Neumann
# condition has already been imposed if the Node is a boundary Node.
# But we can specify another condition with another value as follows
bc = BoundaryCondition()
#res.addBoundaryCondition(bc, x='before')
# Set the initial pressure array
res.setInitPressure(6000)
# Set a source/sink in coordinate (0, 0, 3)
res.grid.nodes[np.ravel_multi_index((0, 75, 75), res.grid.dims)].setSrc(-150)
# Finally, run the simulation!
runSimulation(res, dt=0.1, nTime=10*3+2)
#runSimulation2(res, dt=15, nTime=30)
| {
"content_hash": "9b2ea96d57f312129fafc3023e385126",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 24.867924528301888,
"alnum_prop": 0.716236722306525,
"repo_name": "benjdewantara/fdressim",
"id": "b5fde3e63bfb9943c69e5c2183b7ead739acd687",
"size": "1454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example-5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "627768"
},
{
"name": "Python",
"bytes": "42431"
}
],
"symlink_target": ""
} |
import weakref
class weakattr(object):
"""
weakattr - a weakly-referenced attribute. When the attribute is no longer
referenced, it 'disappears' from the instance. Great for cyclic references.
"""
__slots__ = ["dict", "errmsg"]
def __init__(self, name = None):
self.dict = weakref.WeakValueDictionary()
if name:
self.errmsg = "%%r has no attribute named %r" % (name,)
else:
self.errmsg = "%r has no such attribute"
def __repr__(self):
return "<weakattr at 0x%08X>" % (id(self),)
def __get__(self, obj, cls):
if obj is None:
return self
try:
return self.dict[id(obj)]
except KeyError:
raise AttributeError(self.errmsg % (obj,))
def __set__(self, obj, value):
self.dict[id(obj)] = value
def __delete__(self, obj):
try:
del self.dict[id(obj)]
except KeyError:
raise AttributeError(self.errmsg % (obj,))
#
# example
#
>>> class x(object):
... next = weakattr()
... def __init__(self):
... self.next = self
... def __del__(self):
... print "g'bye"
...
>>>
>>> y = x()
>>> y.next
<__main__.x object at 0x009EFA50>
>>> del y
>>> gc.collect()
g'bye
0
| {
"content_hash": "c959b132a0884bc641aa53cbf33b680a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 23.727272727272727,
"alnum_prop": 0.518007662835249,
"repo_name": "ActiveState/code",
"id": "8b5ec7d8da822e962951ec4d2f8afa13e601b09b",
"size": "1305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/496771_weakattr/recipe-496771.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters,'
' numbers, dots or underscores')])
password = PasswordField('Password', validators=[Required(), EqualTo('password2', message='Password must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
| {
"content_hash": "b1979bf9d699ccc796acd186db1e68a0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 120,
"avg_line_length": 51.266666666666666,
"alnum_prop": 0.6306892067620286,
"repo_name": "bluedai180/PythonExercise",
"id": "e6af37d02cd15e59d23350f9aabb7981588f16e1",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercise/Flask/app/auth/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3259"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "100215"
}
],
"symlink_target": ""
} |
import os
import re
import codecs
from setuptools import setup
directory_name = os.path.dirname(__file__)
with codecs.open(os.path.join(directory_name, 'pytest_testbook', '__init__.py'), encoding='utf-8') as fd:
VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(fd.read()).group(1)
def read(fname):
file_path = os.path.join(directory_name, fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-testbook',
version=VERSION,
author='Ernesto D. Luzon Jr.',
author_email='raise_a_bug_in_myrepo@github.com',
maintainer='Ernesto D. Luzon Jr.',
maintainer_email='please_raise_a_bug_in_myrepo@github.com',
license='MIT',
url='https://github.com/ldiary/pytest-testbook',
description='A plugin to run tests written in Jupyter notebook',
long_description=read('README.rst'),
packages=["pytest_testbook"],
install_requires=[
'marigoso',
'jupyter',
],
classifiers=[
"Development Status :: 3 - Alpha",
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
entry_points={
'pytest11': [
'testbook = pytest_testbook.plugin',
],
},
)
| {
"content_hash": "2eea5496e199bfb80bc1978c80c5896b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 105,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.6191709844559585,
"repo_name": "ldiary/pytest-testbook",
"id": "b064086b3ddce3fef43d60c3a077c411fbe8fd17",
"size": "1591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9699"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import codecs
import tdigest
def long_description():
with codecs.open('README.rst', encoding='utf8') as f:
return f.read()
setup(
name='tdigest',
version=tdigest.__version__,
description=tdigest.__doc__.strip(),
long_description=long_description(),
#download_url='https://github.com/trademob/python-tdigest',
author=tdigest.__author__,
#author_email='',
license=tdigest.__licence__,
packages=find_packages(),
install_requires=[],
tests_require=[
'sure',
'numpy',
'nose',
],
test_suite='nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Information Analysis',
],
)
| {
"content_hash": "e85ae721bd9b2681fec409fd2897dba0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 30.90909090909091,
"alnum_prop": 0.5911764705882353,
"repo_name": "trademob/t-digest",
"id": "f34b76daaf48435b181a69ffa9ab3111a1253219",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16133"
}
],
"symlink_target": ""
} |
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import cdj
HERE = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests/']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def get_long_description(filename):
path = os.path.join(HERE, filename)
if os.path.exists(path):
return open(path).read()
return ""
setup(
name='cdj',
version=cdj.__version__,
description='Call-do jumper, a mini RPC tool.',
long_description=get_long_description('README.md'),
author='Magine',
author_email='con@loli.lu',
license='MIT LICENSE',
keywords=['rpc', 'call', 'do', 'jumper', 'cdj'],
classifiers=[
'Programming Language :: Python',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: MIT Approved :: The MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
],
url='',
packages=['cdj'],
install_requires=[],
tests_require=['pytest'],
cmdclass={'test': PyTest},
entry_points={
'console_scripts': [
'cdjumper = cdj.server:serve',
],
},
)
| {
"content_hash": "e51b6aac017ac2b19899aed26af6d587",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 71,
"avg_line_length": 25.56140350877193,
"alnum_prop": 0.608098833218943,
"repo_name": "Ma233/cdj",
"id": "b7502a8cc90c3e70c5fc96fd18e66588caa5be21",
"size": "1457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "104"
},
{
"name": "Python",
"bytes": "4107"
}
],
"symlink_target": ""
} |
""" Classes representing perfetto trace protobuf messages.
This module makes use of neither python-protobuf library nor python classes
compiled from .proto definitions, because currently there's no way to
deploy those to all the places where telemetry is run.
TODO(crbug.com/944078): Remove this module after the python-protobuf library
is deployed to all the bots.
Definitions of perfetto messages can be found here:
https://android.googlesource.com/platform/external/perfetto/+/refs/heads/master/protos/perfetto/trace/
"""
import encoder
import wire_format
class TracePacket(object):
def __init__(self):
self.clock_snapshot = None
self.timestamp = None
self.timestamp_clock_id = None
self.interned_data = None
self.thread_descriptor = None
self.incremental_state_cleared = None
self.chrome_event = None
self.track_event = None
self.trusted_packet_sequence_id = None
self.chrome_benchmark_metadata = None
def encode(self):
parts = []
if self.chrome_event is not None:
tag = encoder.TagBytes(5, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.chrome_event.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.clock_snapshot is not None:
tag = encoder.TagBytes(6, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.clock_snapshot.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.timestamp is not None:
writer = encoder.UInt64Encoder(8, False, False)
writer(parts.append, self.timestamp)
if self.trusted_packet_sequence_id is not None:
writer = encoder.UInt32Encoder(10, False, False)
writer(parts.append, self.trusted_packet_sequence_id)
if self.track_event is not None:
tag = encoder.TagBytes(11, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.track_event.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.interned_data is not None:
tag = encoder.TagBytes(12, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.interned_data.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.incremental_state_cleared is not None:
writer = encoder.BoolEncoder(41, False, False)
writer(parts.append, self.incremental_state_cleared)
if self.thread_descriptor is not None:
tag = encoder.TagBytes(44, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.thread_descriptor.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.chrome_benchmark_metadata is not None:
tag = encoder.TagBytes(48, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.chrome_benchmark_metadata.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.timestamp_clock_id is not None:
writer = encoder.UInt32Encoder(58, False, False)
writer(parts.append, self.timestamp_clock_id)
return b"".join(parts)
class InternedData(object):
def __init__(self):
self.event_category = None
self.legacy_event_name = None
def encode(self):
parts = []
if self.event_category is not None:
tag = encoder.TagBytes(1, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.event_category.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.legacy_event_name is not None:
tag = encoder.TagBytes(2, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.legacy_event_name.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
return b"".join(parts)
class EventCategory(object):
def __init__(self):
self.iid = None
self.name = None
def encode(self):
if (self.iid is None or self.name is None):
raise RuntimeError("Missing mandatory fields.")
parts = []
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.iid)
writer = encoder.StringEncoder(2, False, False)
writer(parts.append, self.name)
return b"".join(parts)
LegacyEventName = EventCategory
class ThreadDescriptor(object):
def __init__(self):
self.pid = None
self.tid = None
def encode(self):
if (self.pid is None or self.tid is None):
raise RuntimeError("Missing mandatory fields.")
parts = []
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.pid)
writer = encoder.UInt32Encoder(2, False, False)
writer(parts.append, self.tid)
return b"".join(parts)
class ChromeEventBundle(object):
def __init__(self):
self.metadata = []
def encode(self):
parts = []
for item in self.metadata:
tag = encoder.TagBytes(2, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = item.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
return b"".join(parts)
class TrackEvent(object):
def __init__(self):
self.legacy_event = None
self.category_iids = None
self.debug_annotations = []
def encode(self):
parts = []
if self.category_iids is not None:
writer = encoder.UInt32Encoder(3, is_repeated=True, is_packed=False)
writer(parts.append, self.category_iids)
for annotation in self.debug_annotations:
tag = encoder.TagBytes(4, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = annotation.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.legacy_event is not None:
tag = encoder.TagBytes(6, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.legacy_event.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
return b"".join(parts)
class LegacyEvent(object):
def __init__(self):
self.phase = None
self.name_iid = None
def encode(self):
parts = []
if self.name_iid is not None:
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.name_iid)
if self.phase is not None:
writer = encoder.Int32Encoder(2, False, False)
writer(parts.append, self.phase)
return b"".join(parts)
class ChromeBenchmarkMetadata(object):
def __init__(self):
self.benchmark_start_time_us = None
self.story_run_time_us = None
self.benchmark_name = None
self.benchmark_description = None
self.story_name = None
self.story_tags = None
self.story_run_index = None
self.label = None
def encode(self):
parts = []
if self.benchmark_start_time_us is not None:
writer = encoder.Int64Encoder(1, False, False)
writer(parts.append, self.benchmark_start_time_us)
if self.story_run_time_us is not None:
writer = encoder.Int64Encoder(2, False, False)
writer(parts.append, self.story_run_time_us)
if self.benchmark_name is not None:
writer = encoder.StringEncoder(3, False, False)
writer(parts.append, self.benchmark_name)
if self.benchmark_description is not None:
writer = encoder.StringEncoder(4, False, False)
writer(parts.append, self.benchmark_description)
if self.label is not None:
writer = encoder.StringEncoder(5, False, False)
writer(parts.append, self.label)
if self.story_name is not None:
writer = encoder.StringEncoder(6, False, False)
writer(parts.append, self.story_name)
if self.story_tags is not None:
writer = encoder.StringEncoder(7, is_repeated=True, is_packed=False)
writer(parts.append, self.story_tags)
if self.story_run_index is not None:
writer = encoder.Int32Encoder(8, False, False)
writer(parts.append, self.story_run_index)
return b"".join(parts)
def write_trace_packet(output, trace_packet):
tag = encoder.TagBytes(1, wire_format.WIRETYPE_LENGTH_DELIMITED)
output.write(tag)
binary_data = trace_packet.encode()
encoder._EncodeVarint(output.write, len(binary_data))
output.write(binary_data)
class DebugAnnotation(object):
def __init__(self):
self.name = None
self.int_value = None
self.double_value = None
self.string_value = None
def encode(self):
if self.name is None:
raise RuntimeError("DebugAnnotation must have a name.")
if ((self.string_value is not None) +
(self.int_value is not None) +
(self.double_value is not None)) != 1:
raise RuntimeError("DebugAnnotation must have exactly one value.")
parts = []
writer = encoder.StringEncoder(10, False, False)
writer(parts.append, self.name)
if self.int_value is not None:
writer = encoder.Int64Encoder(4, False, False)
writer(parts.append, self.int_value)
if self.double_value is not None:
writer = encoder.DoubleEncoder(5, False, False)
writer(parts.append, self.double_value)
if self.string_value is not None:
writer = encoder.StringEncoder(6, False, False)
writer(parts.append, self.string_value)
return b"".join(parts)
class ChromeMetadata(object):
def __init__(self):
self.name = None
self.string_value = None
def encode(self):
if self.name is None or self.string_value is None:
raise RuntimeError("ChromeMetadata must have a name and a value.")
parts = []
writer = encoder.StringEncoder(1, False, False)
writer(parts.append, self.name)
writer = encoder.StringEncoder(2, False, False)
writer(parts.append, self.string_value)
return b"".join(parts)
class Clock(object):
def __init__(self):
self.clock_id = None
self.timestamp = None
def encode(self):
if self.clock_id is None or self.timestamp is None:
raise RuntimeError("Clock must have a clock_id and a timestamp.")
parts = []
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.clock_id)
writer = encoder.UInt64Encoder(2, False, False)
writer(parts.append, self.timestamp)
return b"".join(parts)
class ClockSnapshot(object):
def __init__(self):
self.clocks = []
def encode(self):
if len(self.clocks) < 2:
raise RuntimeError("ClockSnapshot must have at least two clocks.")
parts = []
for clock in self.clocks:
tag = encoder.TagBytes(1, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = clock.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
return b"".join(parts)
| {
"content_hash": "0889f9bb396e6996ed55f47c65858f72",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 102,
"avg_line_length": 31.91131498470948,
"alnum_prop": 0.674556780067082,
"repo_name": "endlessm/chromium-browser",
"id": "5be4a8e1d771ed40398864709c7bf1a6206689b6",
"size": "10598",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/perfetto_proto_classes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from similarity_analyzer import SimilarityAnalyzer
# import datetime for the timestamp in response checked queue
from datetime import datetime, timedelta
# import time to be able to sleep
import time
# import twythonaccess to be able to send tweets
import twythonaccess
# import setup to be able to read the persona
import setup
# import error messenger
from error_messenger import send_error_message
# The Coordinate class will coordinate all actions (wow so much info)
# It will be concurrently accessed at four different threads
# Each of its methods will only be accessed at one thread at a time
# The communication between threads is made via the class' properties (e.g. the tweet lists)
class Coordinator():
# The queue of tweets to be analyzed for similarity
# This should always be kept under, say, 100 elements
# The above measure is to ensure the waiting time for similarity analysis is short,
# i.e. we don't want a bottle neck waiting for similarity analysis
similarity_analyzer_queue = []
# The queue of tweets to be sent
# This should be kept under, say, 2 elements
# This is to ensure the response isn't all too delayed, but still somewhat delayed
# The data is a tuple, on the following form: (reply_text, base_tweet, similarity_ratio)
send_tweet_queue = []
# The queue of tweets to be response checked
# It takes around 1 minute to process each tweet
# And a waiting time of around 5 hours should be suitable
# Thus, the limit to this queue should be 300 elements
# The elements are constituted by a tuple: (timestamp, tweet)
# They should not be processed if less than 2 hours have passed
response_checker_queue = []
# The threshold for sending a tweet should initially be set to 0.5
# The threshold is increased whenever a match is made, and vice versa
# Thus, the bot will only get more accurate over time
similarity_threshold = 0.5
# Getting new tweets from streamer
# Just add them to the similarity queue, after filtering out some tweets
def new_tweet(self, tweet):
# filter out retweets
if tweet["text"].startswith("RT"):
return
if "retweeted_status" in tweet:
return
# don't reply to replies – they have too much context going on
if tweet["in_reply_to_status_id"] != None:
return
# if the user is protected, then return
if tweet["user"]["protected"]:
return
# filter out tweets containing urls – once again, we don't really know what's going on
if tweet["entities"]["urls"]:
return
# add to the similarity analyzer queue, if its length is less than 100 elements
if len(self.similarity_analyzer_queue) < 100:
print("new tweet: " + tweet["text"])
self.similarity_analyzer_queue.append(tweet)
# The last sent response, to not send the same response twice
last_sent_response = ""
# This loop is run in its own thread, indefinitely
# It takes the first element from the queue, analyzes it,
# and appends to both the send tweet list and the response checker list
def similarity_analysis_loop(self):
# add error handling
while True:
try:
# sleep for periods of 10 seconds until there is a tweet in the queue
while len(self.similarity_analyzer_queue) == 0:
time.sleep(10)
# Take the first element from the similarity analyzer queue
tweet = self.similarity_analyzer_queue.pop(0)
# analyze the tweet
# the analyzer will return the best match tweet text, along with the similarity ratio between the tweet and its match
# the max length of the response text has to be 140 - 1 - length of screen name - 1, for the "@screen_name " prefix
best_match_response, similarity_ratio = self.similarity_analyzer.analyze_tweet(tweet["text"], max_length = 138 - len(tweet["user"]["screen_name"]))
print("found similar response with similarity ratio of " + str(similarity_ratio) + ": " + best_match_response)
# check if the similarity ratio is greater than or equal to the threshold, or not
if similarity_ratio >= self.similarity_threshold:
if self.last_sent_response != best_match_response:
# yay, we can send this tweet
# the send tweet queue should never be longer than 1 element
if len(self.send_tweet_queue) < 1:
self.send_tweet_queue.append((best_match_response, tweet, similarity_ratio))
else:
# if any tweet has a ratio lower than the current threshold,
# then replace that tweet with this one
# this means that even though this similarity ratio may be higher than any similarity ratio in the send tweet queue,
# don't replace them if they are not beneath the threshold. this is for more unpredictability, and more humanness.
# if this ratio is greater than the to be sent one, but less than 0.7, then exchange them. NOPE
for index, (to_be_sent_response, to_be_sent_to_tweet, to_be_sent_ratio) in enumerate(self.send_tweet_queue):
if to_be_sent_ratio < self.similarity_threshold:
self.send_tweet_queue[index] = (best_match_response, tweet, similarity_ratio)
break
# Increase the threshold, in an effort to increase the accuracy of the tweets
# Increase it by 0.01 (if smaller than 0.9)
self.similarity_threshold = min(0.9, self.similarity_threshold + 0.01)
else:
# Decrease the threshold, so as to be able to finally send some tweets
# Never go below 0.2
self.similarity_threshold = max(0.2, self.similarity_threshold - 0.01)
print("new threshold: " + str(self.similarity_threshold))
# if the response checked queue has fewer than 300 elements, add this tweet, along with the current timestamp
if len(self.response_checker_queue) < 300:
self.response_checker_queue.append((datetime.utcnow(), tweet))
except Exception as exception:
# print the exception and then sleep for 2 hours
# the sleep will reset all rate limiting
print(exception)
print("will sleep for 2 hours to avoid exception in similarity analysis loop")
send_error_message(exception, "similarity_analysis_loop")
time.sleep(2 * 60 * 60)
print("finished sleep after exception in similarity analysis loop. will now start anew")
# This function should run in its own thread, indefinitely
# It gets tweets from the queue, and processes them to find the best response
# If a good enough response is found, then the response and the base tweet is appended to the responses.txt
def response_checker_loop(self):
while True:
try:
# wait until there is a tweet in the queue
while len(self.response_checker_queue) == 0:
time.sleep(10)
# take the first element
# it is a tuple, formatted (timestamp, tweet)
timestamp, tweet = self.response_checker_queue.pop(0)
# sleep until two hours since the tweet was sent have passed
time.sleep(max(0, (timestamp + timedelta(hours=2) - datetime.utcnow()).total_seconds()))
print("response checking tweet: " + tweet["text"])
# great
# now, lets find the replies
# 180 calls like this one are allowed per 15 minute window
possible_replies = twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.response_checker).search(q = "@" + tweet["user"]["screen_name"], count = 100, result_type = "recent", since_id = tweet["id"], include_entities = False)["statuses"]
# now go through each reply, and find real replies
real_replies = []
for possible_reply in possible_replies:
if possible_reply["in_reply_to_status_id"] == tweet["id"]:
# yay, we found a real reply
real_replies.append(possible_reply)
if not real_replies:
# well, to spare any api calls, simply return prematurely here
# wait for 8 seconds to satisfy api limits on search
time.sleep(8)
continue
# now that we (potentially) have the real replies, find the best one
# initialize it with None, because we might not find a suitable reply
best_reply = None
if setup.FAVOR_RESPONSES_LIKED_BY_THE_RESPONDEE:
# just choose the first tweet that seems to be liked by the respondee
# first get the 200 most recently liked tweets by the respondee
# this api call is rate limited at once per minute
recently_liked = twythonaccess.authorize(twythonaccess.TwitterApp.response_checker).get_favorites(user_id = tweet["user"]["id"], count = 200, since_id = tweet["id"], include_entities = False)
# now, we just have to check whether any of these tweets coincide with a tweet in the real_replies
for real_reply in real_replies:
for liked in recently_liked:
if real_reply["id"] == liked["id"]:
# yay! we found a reply that was liked by the original tweet author
# if the user has liked many replies, we don't care about that
best_reply = real_reply
break
else:
continue
break
else:
# determine the tweet to add based on the like and retweet count
best_reply_like_and_retweet_count = 0
for real_reply in real_replies:
super_count = real_reply["favorite_count"] + real_reply["retweet_count"]
if super_count > best_reply_like_and_retweet_count:
best_reply = real_reply
best_reply_like_and_retweet_count = super_count
# check whether the best reply is a tweet or not
if best_reply != None:
print("did find best reply: " + best_reply["text"])
# yay, we have a decent reply!
reply_text = best_reply["text"]
base_text = tweet["text"]
# now, remove the mentions at the start of the reply text
while reply_text.startswith("@"):
# remove the first word
reply_text = reply_text.split(" ", 1)[1]
# encode all newlines as explcitly written newlines, so that the tweets fit on one line each
reply_text = reply_text.replace("\n", "\\n")
base_text = base_text.replace("\n", "\\n")
# now, append the reply text and the base text to the responses.txt file
# the reply text should be written first, and the base text afterwards
# we assume that the responses.txt file is correctly formatted (i.e. preserving the always-even-lines invariant)
with open("responses.txt", "a") as responses_file:
responses_file.write(reply_text + "\n")
responses_file.write(base_text + "\n")
# now, sleep for 70 seconds (to avoid rate limiting on get_favorites)
time.sleep(70)
except Exception as exception:
print("oh, some error in response checker loop")
print(exception)
send_error_message(exception, "response_checker_loop")
print("will wait for 2 hours")
time.sleep(2 * 60 * 60)
print("has slept in response checker loop, will now start anew")
# This function is run in its own thread, indefinitely
# It takes tweets from the send_tweet_queue, and sends them
# It waits for 1 minute between each sent tweet, in an effort not to get rate limited
# Apparently, 1 minute is too short a wait
# Twitter has no strict rules on this, but try 15 minutes
# 10 minutes do not work
def send_tweet_loop(self):
while True:
try:
# sleep until there is a tweet in the queue
while len(self.send_tweet_queue) == 0:
time.sleep(30)
# take the first element
# it is a tuple, as defined above
reply_text, base_tweet, similarity_ratio = self.send_tweet_queue.pop(0)
self.last_sent_response = reply_text
# add @screen_name to the reply text
reply_text = "@" + base_tweet["user"]["screen_name"] + " " + reply_text
# send the tweet
twythonaccess.send_tweet(reply_text, twitter_app = twythonaccess.TwitterApp.send_tweet, in_reply_to_status_id = base_tweet["id"])
# sleep for 15 minutes
time.sleep(15 * 60)
except Exception as exception:
print("oh, some error in send tweet loop")
print(exception)
print("will wait for 2 hours")
send_error_message(exception, "send_tweet_loop")
time.sleep(2 * 60 * 60)
print("has slept in send tweet loop, will now start anew")
| {
"content_hash": "b68638529941154f0f7af998da387514",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 261,
"avg_line_length": 55.719844357976655,
"alnum_prop": 0.5839385474860335,
"repo_name": "ArVID220u/TheHumanBot",
"id": "b40408890548046c865134607eff3304bfff8c33",
"size": "14363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coordinator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41530"
}
],
"symlink_target": ""
} |
from unittest import TestCase
class TestStrategy(TestCase):
def test_place_order(self):
self.fail()
| {
"content_hash": "95ef2f15cc3f70c7a6c9ed52a4ff0f7a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 31,
"avg_line_length": 19,
"alnum_prop": 0.7017543859649122,
"repo_name": "michaelchu/kaleidoscope",
"id": "9011cd00a7d9309c20affa5200aa1567da02f7c5",
"size": "114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_strategy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84660"
}
],
"symlink_target": ""
} |
"""Sanitizer for body fields sent via GCP API.
The sanitizer removes fields specified from the body.
Context
-------
In some cases where GCP operation requires modification of existing resources (such
as instances or instance templates) we need to sanitize body of the resources returned
via GCP APIs. This is in the case when we retrieve information from GCP first,
modify the body and either update the existing resource or create a new one with the
modified body. Usually when you retrieve resource from GCP you get some extra fields which
are Output-only, and we need to delete those fields if we want to use
the body as input for subsequent create/insert type operation.
Field specification
-------------------
Specification of fields is an array of strings which denote names of fields to be removed.
The field can be either direct field name to remove from the body or the full
specification of the path you should delete - separated with '.'
>>> FIELDS_TO_SANITIZE = [
>>> "kind",
>>> "properties.disks.kind",
>>> "properties.metadata.kind",
>>>]
>>> body = {
>>> "kind": "compute#instanceTemplate",
>>> "name": "instance",
>>> "properties": {
>>> "disks": [
>>> {
>>> "name": "a",
>>> "kind": "compute#attachedDisk",
>>> "type": "PERSISTENT",
>>> "mode": "READ_WRITE",
>>> },
>>> {
>>> "name": "b",
>>> "kind": "compute#attachedDisk",
>>> "type": "PERSISTENT",
>>> "mode": "READ_WRITE",
>>> }
>>> ],
>>> "metadata": {
>>> "kind": "compute#metadata",
>>> "fingerprint": "GDPUYxlwHe4="
>>> },
>>> }
>>> }
>>> sanitizer=GcpBodyFieldSanitizer(FIELDS_TO_SANITIZE)
>>> SANITIZED_BODY = sanitizer.sanitize(body)
>>> json.dumps(SANITIZED_BODY, indent=2)
{
"name": "instance",
"properties": {
"disks": [
{
"name": "a",
"type": "PERSISTENT",
"mode": "READ_WRITE",
},
{
"name": "b",
"type": "PERSISTENT",
"mode": "READ_WRITE",
}
],
"metadata": {
"fingerprint": "GDPUYxlwHe4="
},
}
}
Note that the components of the path can be either dictionaries or arrays of dictionaries.
In case they are dictionaries, subsequent component names key of the field, in case of
arrays - the sanitizer iterates through all dictionaries in the array and searches
components in all elements of the array.
"""
from typing import List
from airflow import LoggingMixin, AirflowException
class GcpFieldSanitizerException(AirflowException):
"""Thrown when sanitizer finds unexpected field type in the path
(other than dict or array).
"""
def __init__(self, message):
super().__init__(message)
class GcpBodyFieldSanitizer(LoggingMixin):
"""Sanitizes the body according to specification.
:param sanitize_specs: array of strings that specifies which fields to remove
:type sanitize_specs: list[str]
"""
def __init__(self, sanitize_specs):
# type: (List[str]) -> None
super().__init__()
self._sanitize_specs = sanitize_specs
def _sanitize(self, dictionary, remaining_field_spec, current_path):
field_split = remaining_field_spec.split(".", 1)
if len(field_split) == 1:
field_name = field_split[0]
if field_name in dictionary:
self.log.info("Deleted %s [%s]", field_name, current_path)
del dictionary[field_name]
else:
self.log.debug(
"The field %s is missing in %s at the path %s.", field_name, dictionary, current_path
)
else:
field_name = field_split[0]
remaining_path = field_split[1]
child = dictionary.get(field_name)
if child is None:
self.log.debug(
"The field %s is missing in %s at the path %s. ", field_name, dictionary, current_path
)
elif isinstance(child, dict):
self._sanitize(child, remaining_path, "{}.{}".format(
current_path, field_name))
elif isinstance(child, list):
for index, elem in enumerate(child):
if not isinstance(elem, dict):
self.log.warn(
"The field %s element at index %s is of wrong type. "
"It should be dict and is %s. Skipping it.",
current_path, index, elem)
self._sanitize(elem, remaining_path, "{}.{}[{}]".format(
current_path, field_name, index))
else:
self.log.warn(
"The field %s is of wrong type. It should be dict or list and it is %s. Skipping it.",
current_path, child
)
def sanitize(self, body):
for elem in self._sanitize_specs:
self._sanitize(body, elem, "")
| {
"content_hash": "f2993cf42ef4c0df13c7b9bf7a2699af",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 106,
"avg_line_length": 35.317567567567565,
"alnum_prop": 0.550411325808303,
"repo_name": "r39132/airflow",
"id": "a449613174bb8490d09660ea2a411d89e97a104d",
"size": "6038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/utils/gcp_field_sanitizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import namedtuple
# Constraints with linear expressions.
# constr_id is used to recover dual variables.
# expr == 0
LinEqConstr = namedtuple("LinEqConstr", ["expr",
"constr_id",
"size"])
# expr <= 0
LinLeqConstr = namedtuple("LinLeqConstr", ["expr",
"constr_id",
"size"])
| {
"content_hash": "8e4ddef8e9b0f00b8cd8d9a1dade43e7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 36.064516129032256,
"alnum_prop": 0.6457960644007156,
"repo_name": "riadnassiffe/Simulator",
"id": "63832d7c70fdd9a5b771de1093f250a188c0b507",
"size": "1118",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "src/tools/ecos/cvxpy/cvxpy/lin_ops/lin_constraints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66812"
}
],
"symlink_target": ""
} |
import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class PointsResetModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Points Reset"
DESCRIPTION = "Reset points from a user with negative points."
CATEGORY = "Feature"
PARENT_MODULE = BasicCommandsModule
@staticmethod
def points_reset(bot, source, message, **options):
if message is None or len(message) == 0:
return
username = message.split(" ")[0]
if len(username) < 2:
return
with DBManager.create_session_scope() as db_session:
victim = User.find_by_user_input(db_session, username)
if victim is None:
bot.whisper(source, "This user does not exist FailFish")
return
if victim.points >= 0:
bot.whisper(source, f"{victim} doesn't have negative points FailFish")
return
if victim.points <= -1:
old_points = victim.points
victim.points = 0
bot.whisper(source, f"You changed the points for {victim} from {old_points} to {victim.points} points")
def load_commands(self, **options):
self.commands["pointsreset"] = Command.raw_command(
self.points_reset,
delay_all=0,
delay_user=5,
level=500,
description="Reset points from a user with negative points.",
can_execute_with_whisper=1,
command="pointsreset",
examples=[
CommandExample(
None,
"Reset points from a user with negative points.",
chat="user:!pointsreset pajtest\n"
"bot>user:You changed the points for pajtest from -10000 to 0 points",
description="",
).parse()
],
)
| {
"content_hash": "0a211e5b49702ab28d3ba159cfde5593",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 119,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.5802698929734761,
"repo_name": "pajlada/pajbot",
"id": "f6abd0315b7336421f2ee0fc29144f21ec9b9c4e",
"size": "2149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pajbot/modules/basic/pointsreset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11288"
},
{
"name": "HTML",
"bytes": "129576"
},
{
"name": "JavaScript",
"bytes": "202450"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "987601"
},
{
"name": "Shell",
"bytes": "589"
}
],
"symlink_target": ""
} |
"""Test the module cluster centroids."""
from collections import Counter
import pytest
import numpy as np
from scipy import sparse
from sklearn.cluster import KMeans
from sklearn.datasets import make_classification
from imblearn.under_sampling import ClusterCentroids
RND_SEED = 0
X = np.array(
[
[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982],
]
)
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
R_TOL = 1e-4
@pytest.mark.parametrize(
"X, expected_voting", [(X, "soft"), (sparse.csr_matrix(X), "hard")]
)
def test_fit_resample_check_voting(X, expected_voting):
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit_resample(X, Y)
assert cc.voting_ == expected_voting
def test_fit_resample_auto():
sampling_strategy = "auto"
cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
def test_fit_resample_half():
sampling_strategy = {0: 3, 1: 6}
cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (9, 2)
assert y_resampled.shape == (9,)
def test_multiclass_fit_resample():
y = Y.copy()
y[5] = 2
y[6] = 2
cc = ClusterCentroids(random_state=RND_SEED)
_, y_resampled = cc.fit_resample(X, y)
count_y_res = Counter(y_resampled)
assert count_y_res[0] == 2
assert count_y_res[1] == 2
assert count_y_res[2] == 2
def test_fit_resample_object():
sampling_strategy = "auto"
cluster = KMeans(random_state=RND_SEED)
cc = ClusterCentroids(
sampling_strategy=sampling_strategy, random_state=RND_SEED, estimator=cluster,
)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
def test_fit_hard_voting():
sampling_strategy = "auto"
voting = "hard"
cluster = KMeans(random_state=RND_SEED)
cc = ClusterCentroids(
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
estimator=cluster,
voting=voting,
)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
for x in X_resampled:
assert np.any(np.all(x == X, axis=1))
@pytest.mark.parametrize(
"cluster_centroids_params, err_msg",
[
({"estimator": "rnd"}, "has to be a KMeans clustering"),
({"voting": "unknown"}, "needs to be one of"),
],
)
def test_fit_resample_error(cluster_centroids_params, err_msg):
cc = ClusterCentroids(**cluster_centroids_params)
with pytest.raises(ValueError, match=err_msg):
cc.fit_resample(X, Y)
def test_cluster_centroids_n_jobs():
# check that we deprecate the `n_jobs` parameter.
cc = ClusterCentroids(n_jobs=1)
with pytest.warns(FutureWarning) as record:
cc.fit_resample(X, Y)
assert len(record) == 1
assert "'n_jobs' was deprecated" in record[0].message.args[0]
def test_cluster_centroids_hard_target_class():
# check that the samples selecting by the hard voting corresponds to the
# targeted class
# non-regression test for:
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/738
X, y = make_classification(
n_samples=1000,
n_features=2,
n_informative=1,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
weights=[0.3, 0.7],
class_sep=0.01,
random_state=0,
)
cc = ClusterCentroids(voting="hard", random_state=0)
X_res, y_res = cc.fit_resample(X, y)
minority_class_indices = np.flatnonzero(y == 0)
X_minority_class = X[minority_class_indices]
resampled_majority_class_indices = np.flatnonzero(y_res == 1)
X_res_majority = X_res[resampled_majority_class_indices]
sample_from_minority_in_majority = [
np.all(np.isclose(selected_sample, minority_sample))
for selected_sample in X_res_majority
for minority_sample in X_minority_class
]
assert sum(sample_from_minority_in_majority) == 0
| {
"content_hash": "b160750bd4720aba545292904d3b1b78",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 86,
"avg_line_length": 29.611842105263158,
"alnum_prop": 0.6411908464785603,
"repo_name": "fmfn/UnbalancedDataset",
"id": "8148e2fdb018ba327c78b27fd8a67a834ad7512a",
"size": "4501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imblearn/under_sampling/_prototype_generation/tests/test_cluster_centroids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "235"
},
{
"name": "Makefile",
"bytes": "667"
},
{
"name": "Python",
"bytes": "278128"
},
{
"name": "Shell",
"bytes": "4610"
}
],
"symlink_target": ""
} |
"""
This module contains APIs to facilitate Imc backup and import
"""
import time
from ..imcexception import ImcValidationException
def backup_create(handle, remote_host, remote_file, protocol, username, password,
passphrase, timeout_in_sec=600, entity="CMC", **kwargs):
"""
backup_create helps create and download Imc backups.
Args:
handle (ImcHandle): Imc Connection handle
remote_host (str): IP or Hostname for the remote host.
remote_file (str): Absolute path and name for the backup file
protocol (str) : "ftp", "http", "scp", "sftp", "tftp"
username (str) : Remote Host user name
password (str) : Remote Host user credentials/password
passphrase (str) : Password for the backup file.
timeout_in_sec (number) : time in seconds for which method waits
for the backUp file to generate before it exits.
entity (str): For C3260 platforms:
"CMC" for backup of chassis related configuration and state
"CIMC1" for backup of server-1 related configuration and state
"CIMC2" for backup of server-2 related configuration and state
kwargs : key=value paired arguments
Example:
remote_file = "/root/config_backup.xml"
backup_create(h,remote_file=remote_file,
protocol="ftp",username="user",password="pass",
remote_host="10.10.10.10",passphrase="xxxxxx")
backup_create(handle, remote_file="/users/xyz/backup",
remote_host="1.1.1.1", protocol="scp",
username="admin", password="password",
passphrase="passphrase", timeout_in_sec=600, entity="CMC")
"""
from ..mometa.mgmt.MgmtBackup import MgmtBackup, MgmtBackupConsts
from ..mometa.top.TopSystem import TopSystem
from ..mometa.equipment.EquipmentChassis import EquipmentChassis
from ..imccoreutils import IMC_PLATFORM
if password == "" or passphrase == "":
raise ImcValidationException("Invalid password or passphrase")
top_system = TopSystem()
parent_mo = None
mgmt_backup = None
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_mo = top_system
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_mo = EquipmentChassis(parent_mo_or_dn=top_system)
mgmt_backup = MgmtBackup(parent_mo_or_dn=parent_mo)
mgmt_backup.hostname = remote_host
mgmt_backup.remote_file = remote_file
mgmt_backup.user = username
mgmt_backup.pwd = password
mgmt_backup.passphrase = passphrase
mgmt_backup.proto = protocol
mgmt_backup.admin_state = MgmtBackupConsts.ADMIN_STATE_ENABLED
mgmt_backup.set_prop_multiple(**kwargs)
if handle.platform == IMC_PLATFORM.TYPE_MODULAR:
mgmt_backup.entity = entity
handle.add_mo(mgmt_backup, modify_present=True)
# Checking for the backup to complete.
time.sleep(10)
duration = timeout_in_sec
poll_interval = 2
download_status = False
while not download_status:
mgmt_backup = handle.query_dn(dn=mgmt_backup.dn)
admin_state_temp = mgmt_backup.admin_state
# Break condition:- if state id disabled then break
if admin_state_temp == MgmtBackupConsts.ADMIN_STATE_DISABLED:
if mgmt_backup.fsm_stage_descr == "Completed successfully":
download_status = True
if mgmt_backup.fsm_stage_descr == "Error":
raise ImcValidationException("Failed to export the CIMC "
"configuration file." +
"Error Code: " +
mgmt_backup.fsm_rmt_inv_err_code +
" Error Description: " +
mgmt_backup.fsm_rmt_inv_err_descr)
if download_status:
break
time.sleep(min(duration, poll_interval))
duration = max(0, (duration - poll_interval))
if duration == 0:
handle.remove_mo(mgmt_backup)
raise ImcValidationException('backup_create timed out')
def backup_import(handle, remote_host, remote_file, protocol, username,
password, passphrase, entity="CMC", **kwargs):
"""
This operation uploads a Imc backup taken earlier via GUI
or backup_create operation for all configuration, system configuration,
and logical configuration files. User can perform an import while the
system is up and running.
Args:
handle (ImcHandle): connection handle
remote_host (str): IP or Hostname for the remote host.
remote_file (str): Absolute path and name for the backup file
protocol (str) : "ftp", "http", "scp", "sftp", "tftp"
username (str) : Remote Host user name
password (str) : Remote Host user credentials/password
passphrase (str) : Password for the backup file.
entity (str): For C3260 platforms:
"CMC" for importing chassis related configuration and state
"CIMC1" for importing server-1 related configuration and state
"CIMC2" for importing server-2 related configuration and state
kwargs : key=value paired arguments
Example:
remote_file = "/root/config_backup.xml"
backup_import(h,remote_file=remote_file,
protocol="ftp",username="user",password="pass",
remote_host="10.10.10.10",passphrase="xxxxxx")
backup_import(handle, remote_file="/users/xyz/backup",
remote_host="1.1.1.1", protocol="scp",
username="admin", password="password",
passphrase="passphrase", timeout_in_sec=600, entity="CMC")
"""
from ..mometa.top.TopSystem import TopSystem
from ..mometa.mgmt.MgmtImporter import MgmtImporter, MgmtImporterConsts
from ..mometa.equipment.EquipmentChassis import EquipmentChassis
from ..imccoreutils import IMC_PLATFORM
if password == "" or passphrase == "":
raise ImcValidationException("Invalid password or passphrase")
# create MgmtImporter
top_system = TopSystem()
parent_mo = None
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
parent_mo = top_system
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
parent_mo = EquipmentChassis(parent_mo_or_dn=top_system)
mgmt_importer = MgmtImporter(parent_mo_or_dn=parent_mo)
mgmt_importer.hostname = remote_host
mgmt_importer.remote_file = remote_file
mgmt_importer.proto = protocol
mgmt_importer.user = username
mgmt_importer.pwd = password
mgmt_importer.passphrase = passphrase
mgmt_importer.admin_state = MgmtImporterConsts.ADMIN_STATE_ENABLED
mgmt_importer.set_prop_multiple(**kwargs)
if handle.platform == IMC_PLATFORM.TYPE_MODULAR:
mgmt_importer.entity = entity
handle.add_mo(mgmt_importer, modify_present=True)
time.sleep(10)
download_status = False
while not download_status:
mgmt_importer = handle.query_dn(dn=mgmt_importer.dn)
admin_state_temp = mgmt_importer.admin_state
# Break condition:- if state id disabled then break
if admin_state_temp == MgmtImporterConsts.ADMIN_STATE_DISABLED:
if mgmt_importer.fsm_stage_descr == "Completed successfully":
download_status = True
if mgmt_importer.fsm_stage_descr == "Error":
raise ImcValidationException(
"Failed to import the CIMC "
"configuration file." +
"Error Code: " +
mgmt_importer.fsm_rmt_inv_err_code +
" Error Description: " +
mgmt_importer.fsm_rmt_inv_err_descr)
if download_status:
break
return mgmt_importer
| {
"content_hash": "9403279887b1344a65f8526524de0271",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 84,
"avg_line_length": 41.968586387434556,
"alnum_prop": 0.6212574850299402,
"repo_name": "ragupta-git/ImcSdk",
"id": "8877eefa2755763c33acd3ac18ecec3f841801ca",
"size": "8595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imcsdk/utils/imcbackup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1042023"
}
],
"symlink_target": ""
} |
import functools
import getpass
import hashlib
import inspect
import logging
import sys
import prettytable
import six
from keystoneclient import exceptions
from keystoneclient.openstack.common import strutils
logger = logging.getLogger(__name__)
# Decorator for cli-args
def arg(*args, **kwargs):
def _decorator(func):
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
return func
return _decorator
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def print_list(objs, fields, formatters={}, order_by=None):
pt = prettytable.PrettyTable([f for f in fields],
caching=False, print_empty=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
if data is None:
data = ''
row.append(data)
pt.add_row(row)
if order_by is None:
order_by = fields[0]
print(strutils.safe_encode(pt.get_string(sortby=order_by)))
def _word_wrap(string, max_length=0):
"""wrap long strings to be no longer than max_length."""
if max_length <= 0:
return string
return '\n'.join([string[i:i + max_length] for i in
range(0, len(string), max_length)])
def print_dict(d, wrap=0):
"""pretty table prints dictionaries.
Wrap values to max_length wrap if wrap>0
"""
pt = prettytable.PrettyTable(['Property', 'Value'],
caching=False, print_empty=False)
pt.aligns = ['l', 'l']
for (prop, value) in six.iteritems(d):
if value is None:
value = ''
value = _word_wrap(value, max_length=wrap)
pt.add_row([prop, value])
print(strutils.safe_encode(pt.get_string(sortby='Property')))
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
# now try the entity as a string
try:
return manager.get(name_or_id)
except (exceptions.NotFound):
pass
# finally try to find entity by name
try:
if isinstance(name_or_id, six.binary_type):
name_or_id = name_or_id.decode('utf-8', 'strict')
return manager.find(name=name_or_id)
except exceptions.NotFound:
msg = ("No %s with a name or ID of '%s' exists." %
(manager.resource_class.__name__.lower(), name_or_id))
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
def unauthenticated(f):
"""Adds 'unauthenticated' attribute to decorated function.
Usage::
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator.
Returns True if decorator is set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def hash_signed_token(signed_text, mode='md5'):
hash_ = hashlib.new(mode)
hash_.update(signed_text)
return hash_.hexdigest()
def prompt_for_password():
"""Prompt user for password if not provided so the password
doesn't show up in the bash history.
"""
if not (hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()):
# nothing to do
return
while True:
try:
new_passwd = getpass.getpass('New Password: ')
rep_passwd = getpass.getpass('Repeat New Password: ')
if new_passwd == rep_passwd:
return new_passwd
except EOFError:
return
class positional(object):
"""A decorator which enforces only some args may be passed positionally.
This idea and some of the code was taken from the oauth2 client of the
google-api client.
This decorator makes it easy to support Python 3 style key-word only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1, kwonly2=None):
...
All named parameters after * must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1', kwonly2='kw2') # Ok.
To replicate this behaviour with the positional decorator you simply
specify how many arguments may be passed positionally. To replicate the
example above::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a
required keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember that in python the
first positional argument passed is always the instance so you will need to
account for `self` and `cls`::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
If you would prefer not to account for `self` and `cls` you can use the
`method` and `classmethod` helpers which do not consider the initial
positional argument. So the following class is exactly the same as the one
above::
class MyClass(object):
@positional.method(1)
def my_method(self, pos1, kwonly1=None):
...
@positional.classmethod(1)
def my_method(cls, pos1, kwonly1=None):
...
If a value isn't provided to the decorator then it will enforce that
every variable without a default value will be required to be a kwarg::
@positional()
def fn(pos1, kwonly1=None):
...
fn(10) # Ok.
fn(10, 20) # Raises exception.
fn(10, kwonly1=20) # Ok.
This behaviour will work with the `positional.method` and
`positional.classmethod` helper functions as well::
class MyClass(object):
@positional.classmethod()
def my_method(cls, pos1, kwonly1=None):
...
MyClass.my_method(10) # Ok.
MyClass.my_method(10, 20) # Raises exception.
MyClass.my_method(10, kwonly1=20) # Ok.
For compatibility reasons you may wish to not always raise an exception so
a WARN mode is available. Rather than raise an exception a warning message
will be logged::
@positional(1, enforcement=positional.WARN):
def fn(pos1, kwonly=1):
...
Available modes are:
- positional.EXCEPT - the default, raise an exception.
- positional.WARN - log a warning on mistake.
"""
EXCEPT = 'except'
WARN = 'warn'
def __init__(self, max_positional_args=None, enforcement=EXCEPT):
self._max_positional_args = max_positional_args
self._enforcement = enforcement
@classmethod
def method(cls, max_positional_args=None, enforcement=EXCEPT):
if max_positional_args is not None:
max_positional_args += 1
def f(func):
return cls(max_positional_args, enforcement)(func)
return f
@classmethod
def classmethod(cls, *args, **kwargs):
def f(func):
return classmethod(cls.method(*args, **kwargs)(func))
return f
def __call__(self, func):
if self._max_positional_args is None:
spec = inspect.getargspec(func)
self._max_positional_args = len(spec.args) - len(spec.defaults)
plural = '' if self._max_positional_args == 1 else 's'
@functools.wraps(func)
def inner(*args, **kwargs):
if len(args) > self._max_positional_args:
message = ('%(name)s takes at most %(max)d positional '
'argument%(plural)s (%(given)d given)' %
{'name': func.__name__,
'max': self._max_positional_args,
'given': len(args),
'plural': plural})
if self._enforcement == self.EXCEPT:
raise TypeError(message)
elif self._enforcement == self.WARN:
logger.warn(message)
return func(*args, **kwargs)
return inner
| {
"content_hash": "021f2e940959c64cc1b28691f7207bf4",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 79,
"avg_line_length": 30.288961038961038,
"alnum_prop": 0.5879515489334334,
"repo_name": "UTSA-ICS/python-keystoneclient-SID",
"id": "a6c03f5e9629832de79f5bd6e821a525b7689bb5",
"size": "9902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneclient/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1044183"
},
{
"name": "Shell",
"bytes": "12048"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.train import train_dialogue_model
from rasa_core.training_utils import StoryFileReader
from rasa_core.training_utils.visualization import visualize_stories
from tests.conftest import DEFAULT_DOMAIN_PATH, DEFAULT_STORIES_FILE
def test_story_visualization_script():
from rasa_core.visualize import create_argument_parser
assert create_argument_parser() is not None
def test_story_visualization(default_domain):
story_steps = StoryFileReader.read_from_file(
"data/dsl_stories/stories.md", default_domain)
generated_graph = visualize_stories(story_steps)
assert len(generated_graph.nodes()) == 19
def test_training_script(tmpdir):
train_dialogue_model(DEFAULT_DOMAIN_PATH, DEFAULT_STORIES_FILE,
tmpdir.strpath, {})
| {
"content_hash": "0ca09a0e891cd86d6575fb646615bbbe",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 34.96296296296296,
"alnum_prop": 0.7478813559322034,
"repo_name": "deepak02/rasa_core",
"id": "e3de020baa94f3c7b7624ebf50c385156e00f577",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_training.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "273438"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
text2 = models.TextField(blank=True)
# image = models.ImageField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class Comment(models.Model):
author = models.ForeignKey('auth.User')
text = models.TextField()
post = models.ForeignKey('Post', blank=True, null=True, related_name='comment')
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.text
| {
"content_hash": "3f49c691ef90687b5dc001a0109f8974",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 83,
"avg_line_length": 24.697674418604652,
"alnum_prop": 0.6384180790960452,
"repo_name": "fortunto2/django_hack",
"id": "4f235d47c65635700e3bed51c174fa6a2357243d",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "11285"
}
],
"symlink_target": ""
} |
"""Test asmap config argument for ASN-based IP bucketing.
Verify node behaviour and debug log when launching bitcoind in these cases:
1. `bitcoind` with no -asmap arg, using /16 prefix for IP bucketing
2. `bitcoind -asmap=<absolute path>`, using the unit test skeleton asmap
3. `bitcoind -asmap=<relative path>`, using the unit test skeleton asmap
4. `bitcoind -asmap/-asmap=` with no file specified, using the default asmap
5. `bitcoind -asmap` with no file specified and a missing default asmap file
6. `bitcoind -asmap` with an empty (unparsable) default asmap file
The tests are order-independent.
"""
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
DEFAULT_ASMAP_FILENAME = 'ip_asn.map' # defined in src/init.cpp
ASMAP = '../../src/test/data/asmap.raw' # path to unit test skeleton asmap
VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853'
def expected_messages(filename):
return [f'Opened asmap file "{filename}" (59 bytes) from disk',
f'Using asmap version {VERSION} for IP bucketing']
class AsmapTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def test_without_asmap_arg(self):
self.log.info('Test bitcoind with no -asmap arg passed')
self.stop_node(0)
with self.node.assert_debug_log(['Using /16 prefix for IP bucketing']):
self.start_node(0)
def test_asmap_with_absolute_path(self):
self.log.info('Test bitcoind -asmap=<absolute path>')
self.stop_node(0)
filename = os.path.join(self.datadir, 'my-map-file.map')
shutil.copyfile(self.asmap_raw, filename)
with self.node.assert_debug_log(expected_messages(filename)):
self.start_node(0, [f'-asmap={filename}'])
os.remove(filename)
def test_asmap_with_relative_path(self):
self.log.info('Test bitcoind -asmap=<relative path>')
self.stop_node(0)
name = 'ASN_map'
filename = os.path.join(self.datadir, name)
shutil.copyfile(self.asmap_raw, filename)
with self.node.assert_debug_log(expected_messages(filename)):
self.start_node(0, [f'-asmap={name}'])
os.remove(filename)
def test_default_asmap(self):
shutil.copyfile(self.asmap_raw, self.default_asmap)
for arg in ['-asmap', '-asmap=']:
self.log.info(f'Test bitcoind {arg} (using default map file)')
self.stop_node(0)
with self.node.assert_debug_log(expected_messages(self.default_asmap)):
self.start_node(0, [arg])
os.remove(self.default_asmap)
def test_default_asmap_with_missing_file(self):
self.log.info('Test bitcoind -asmap with missing default map file')
self.stop_node(0)
msg = f"Error: Could not find asmap file \"{self.default_asmap}\""
self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
def test_empty_asmap(self):
self.log.info('Test bitcoind -asmap with empty map file')
self.stop_node(0)
with open(self.default_asmap, "w", encoding="utf-8") as f:
f.write("")
msg = f"Error: Could not parse asmap file \"{self.default_asmap}\""
self.node.assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
os.remove(self.default_asmap)
def run_test(self):
self.node = self.nodes[0]
self.datadir = os.path.join(self.node.datadir, self.chain)
self.default_asmap = os.path.join(self.datadir, DEFAULT_ASMAP_FILENAME)
self.asmap_raw = os.path.join(os.path.dirname(os.path.realpath(__file__)), ASMAP)
self.test_without_asmap_arg()
self.test_asmap_with_absolute_path()
self.test_asmap_with_relative_path()
self.test_default_asmap()
self.test_default_asmap_with_missing_file()
self.test_empty_asmap()
if __name__ == '__main__':
AsmapTest().main()
| {
"content_hash": "6dd43c24b1c3b8aa8461b5eae0e73cde",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 89,
"avg_line_length": 39.37623762376238,
"alnum_prop": 0.659039476992708,
"repo_name": "yenliangl/bitcoin",
"id": "704dd6126b5f76dc754ec2abbbfe945cca4447d2",
"size": "4186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/feature_asmap.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "694312"
},
{
"name": "C++",
"bytes": "6161382"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198099"
},
{
"name": "Makefile",
"bytes": "118152"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1537476"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "90713"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
# Portions Copyright Canonical Ltd. 2009
"""A LatentSlave that uses EC2 to instantiate the slaves on demand.
Tested with Python boto 1.5c
"""
import os
import re
import time
import boto
import boto.ec2
import boto.exception
from twisted.internet import defer, threads
from twisted.python import log
from buildbot.buildslave.base import AbstractLatentBuildSlave
from buildbot import interfaces
PENDING = 'pending'
RUNNING = 'running'
SHUTTINGDOWN = 'shutting-down'
TERMINATED = 'terminated'
class EC2LatentBuildSlave(AbstractLatentBuildSlave):
instance = image = None
_poll_resolution = 5 # hook point for tests
def __init__(self, name, password, instance_type, ami=None,
valid_ami_owners=None, valid_ami_location_regex=None,
elastic_ip=None, identifier=None, secret_identifier=None,
aws_id_file_path=None, user_data=None, region=None,
keypair_name='latent_buildbot_slave',
security_name='latent_buildbot_slave',
max_builds=None, notify_on_missing=[], missing_timeout=60*20,
build_wait_timeout=60*10, properties={}, locks=None):
AbstractLatentBuildSlave.__init__(
self, name, password, max_builds, notify_on_missing,
missing_timeout, build_wait_timeout, properties, locks)
if not ((ami is not None) ^
(valid_ami_owners is not None or
valid_ami_location_regex is not None)):
raise ValueError(
'You must provide either a specific ami, or one or both of '
'valid_ami_location_regex and valid_ami_owners')
self.ami = ami
if valid_ami_owners is not None:
if isinstance(valid_ami_owners, (int, long)):
valid_ami_owners = (valid_ami_owners,)
else:
for element in valid_ami_owners:
if not isinstance(element, (int, long)):
raise ValueError(
'valid_ami_owners should be int or iterable '
'of ints', element)
if valid_ami_location_regex is not None:
if not isinstance(valid_ami_location_regex, basestring):
raise ValueError(
'valid_ami_location_regex should be a string')
else:
# verify that regex will compile
re.compile(valid_ami_location_regex)
self.valid_ami_owners = valid_ami_owners
self.valid_ami_location_regex = valid_ami_location_regex
self.instance_type = instance_type
self.keypair_name = keypair_name
self.security_name = security_name
self.user_data = user_data
if identifier is None:
assert secret_identifier is None, (
'supply both or neither of identifier, secret_identifier')
if aws_id_file_path is None:
home = os.environ['HOME']
aws_id_file_path = os.path.join(home, '.ec2', 'aws_id')
if not os.path.exists(aws_id_file_path):
raise ValueError(
"Please supply your AWS access key identifier and secret "
"access key identifier either when instantiating this %s "
"or in the %s file (on two lines).\n" %
(self.__class__.__name__, aws_id_file_path))
with open(aws_id_file_path, 'r') as aws_file:
identifier = aws_file.readline().strip()
secret_identifier = aws_file.readline().strip()
else:
assert aws_id_file_path is None, \
'if you supply the identifier and secret_identifier, ' \
'do not specify the aws_id_file_path'
assert secret_identifier is not None, \
'supply both or neither of identifier, secret_identifier'
region_found = None
# Make the EC2 connection.
if region is not None:
for r in boto.ec2.regions(aws_access_key_id=identifier,
aws_secret_access_key=secret_identifier):
if r.name == region:
region_found = r
if region_found is not None:
self.conn = boto.ec2.connect_to_region(region,
aws_access_key_id=identifier,
aws_secret_access_key=secret_identifier)
else:
raise ValueError('The specified region does not exist: {0}'.format(region))
else:
self.conn = boto.connect_ec2(identifier, secret_identifier)
# Make a keypair
#
# We currently discard the keypair data because we don't need it.
# If we do need it in the future, we will always recreate the keypairs
# because there is no way to
# programmatically retrieve the private key component, unless we
# generate it and store it on the filesystem, which is an unnecessary
# usage requirement.
try:
key_pair = self.conn.get_all_key_pairs(keypair_name)[0]
assert key_pair
# key_pair.delete() # would be used to recreate
except boto.exception.EC2ResponseError, e:
if 'InvalidKeyPair.NotFound' not in e.body:
if 'AuthFailure' in e.body:
print ('POSSIBLE CAUSES OF ERROR:\n'
' Did you sign up for EC2?\n'
' Did you put a credit card number in your AWS '
'account?\n'
'Please doublecheck before reporting a problem.\n')
raise
# make one; we would always do this, and stash the result, if we
# needed the key (for instance, to SSH to the box). We'd then
# use paramiko to use the key to connect.
self.conn.create_key_pair(keypair_name)
# create security group
try:
group = self.conn.get_all_security_groups(security_name)[0]
assert group
except boto.exception.EC2ResponseError, e:
if 'InvalidGroup.NotFound' in e.body:
self.security_group = self.conn.create_security_group(
security_name,
'Authorization to access the buildbot instance.')
# Authorize the master as necessary
# TODO this is where we'd open the hole to do the reverse pb
# connect to the buildbot
# ip = urllib.urlopen(
# 'http://checkip.amazonaws.com').read().strip()
# self.security_group.authorize('tcp', 22, 22, '%s/32' % ip)
# self.security_group.authorize('tcp', 80, 80, '%s/32' % ip)
else:
raise
# get the image
if self.ami is not None:
self.image = self.conn.get_image(self.ami)
else:
# verify we have access to at least one acceptable image
discard = self.get_image()
assert discard
# get the specified elastic IP, if any
if elastic_ip is not None:
elastic_ip = self.conn.get_all_addresses([elastic_ip])[0]
self.elastic_ip = elastic_ip
def get_image(self):
if self.image is not None:
return self.image
if self.valid_ami_location_regex:
level = 0
options = []
get_match = re.compile(self.valid_ami_location_regex).match
for image in self.conn.get_all_images(
owners=self.valid_ami_owners):
# gather sorting data
match = get_match(image.location)
if match:
alpha_sort = int_sort = None
if level < 2:
try:
alpha_sort = match.group(1)
except IndexError:
level = 2
else:
if level == 0:
try:
int_sort = int(alpha_sort)
except ValueError:
level = 1
options.append([int_sort, alpha_sort,
image.location, image.id, image])
if level:
log.msg('sorting images at level %d' % level)
options = [candidate[level:] for candidate in options]
else:
options = [(image.location, image.id, image) for image
in self.conn.get_all_images(
owners=self.valid_ami_owners)]
options.sort()
log.msg('sorted images (last is chosen): %s' %
(', '.join(
['%s (%s)' % (candidate[-1].id, candidate[-1].location)
for candidate in options])))
if not options:
raise ValueError('no available images match constraints')
return options[-1][-1]
def dns(self):
if self.instance is None:
return None
return self.instance.public_dns_name
dns = property(dns)
def start_instance(self, build):
if self.instance is not None:
raise ValueError('instance active')
return threads.deferToThread(self._start_instance)
def _start_instance(self):
image = self.get_image()
reservation = image.run(
key_name=self.keypair_name, security_groups=[self.security_name],
instance_type=self.instance_type, user_data=self.user_data)
self.instance = reservation.instances[0]
log.msg('%s %s starting instance %s' %
(self.__class__.__name__, self.slavename, self.instance.id))
duration = 0
interval = self._poll_resolution
while self.instance.state == PENDING:
time.sleep(interval)
duration += interval
if duration % 60 == 0:
log.msg('%s %s has waited %d minutes for instance %s' %
(self.__class__.__name__, self.slavename, duration//60,
self.instance.id))
self.instance.update()
if self.instance.state == RUNNING:
self.output = self.instance.get_console_output()
minutes = duration//60
seconds = duration%60
log.msg('%s %s instance %s started on %s '
'in about %d minutes %d seconds (%s)' %
(self.__class__.__name__, self.slavename,
self.instance.id, self.dns, minutes, seconds,
self.output.output))
if self.elastic_ip is not None:
self.instance.use_ip(self.elastic_ip)
return [self.instance.id,
image.id,
'%02d:%02d:%02d' % (minutes//60, minutes%60, seconds)]
else:
log.msg('%s %s failed to start instance %s (%s)' %
(self.__class__.__name__, self.slavename,
self.instance.id, self.instance.state))
raise interfaces.LatentBuildSlaveFailedToSubstantiate(
self.instance.id, self.instance.state)
def stop_instance(self, fast=False):
if self.instance is None:
# be gentle. Something may just be trying to alert us that an
# instance never attached, and it's because, somehow, we never
# started.
return defer.succeed(None)
instance = self.instance
self.output = self.instance = None
return threads.deferToThread(
self._stop_instance, instance, fast)
def _stop_instance(self, instance, fast):
if self.elastic_ip is not None:
self.conn.disassociate_address(self.elastic_ip.public_ip)
instance.update()
if instance.state not in (SHUTTINGDOWN, TERMINATED):
instance.terminate()
log.msg('%s %s terminating instance %s' %
(self.__class__.__name__, self.slavename, instance.id))
duration = 0
interval = self._poll_resolution
if fast:
goal = (SHUTTINGDOWN, TERMINATED)
instance.update()
else:
goal = (TERMINATED,)
while instance.state not in goal:
time.sleep(interval)
duration += interval
if duration % 60 == 0:
log.msg(
'%s %s has waited %d minutes for instance %s to end' %
(self.__class__.__name__, self.slavename, duration//60,
instance.id))
instance.update()
log.msg('%s %s instance %s %s '
'after about %d minutes %d seconds' %
(self.__class__.__name__, self.slavename,
instance.id, goal, duration//60, duration%60))
| {
"content_hash": "5f8aee540f017726af7426a3c8021aec",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 91,
"avg_line_length": 43.05592105263158,
"alnum_prop": 0.5366338146535259,
"repo_name": "denny820909/builder",
"id": "f144321b190133bbfc15a9189250b7085a331ee0",
"size": "13804",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/buildslave/ec2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import contextmanager
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jvm_run import JvmRun
from pants.util.contextutil import pushd, temporary_dir
from pants_test.tasks.task_test_base import TaskTestBase
class JvmRunTest(TaskTestBase):
@classmethod
def task_type(cls):
return JvmRun
def setUp(self):
super(JvmRunTest, self).setUp()
@contextmanager
def setup_cmdline_run(self, **options):
"""Run the JvmRun task in command line only mode with the specified extra options.
:returns: the command line string
"""
self.set_options(only_write_cmd_line='a', **options)
jvm_binary = self.make_target('src/java/org/pantsbuild:binary', JvmBinary,
main='org.pantsbuild.Binary')
context = self.context(target_roots=[jvm_binary])
jvm_run = self.create_task(context, 'unused')
self._cmdline_classpath = [os.path.join(self.build_root, c) for c in ['bob', 'fred']]
self.populate_compile_classpath(context=jvm_run.context, classpath=self._cmdline_classpath)
with temporary_dir() as pwd:
with pushd(pwd):
cmdline_file = os.path.join(pwd, 'a')
self.assertFalse(os.path.exists(cmdline_file))
jvm_run.execute()
self.assertTrue(os.path.exists(cmdline_file))
with open(cmdline_file) as fp:
contents = fp.read()
yield contents
def test_cmdline_only(self):
with self.setup_cmdline_run() as cmdline:
expected_suffix = 'java -cp {} org.pantsbuild.Binary'.format(
os.path.pathsep.join(self._cmdline_classpath))
self.assertEquals(expected_suffix, cmdline[-len(expected_suffix):])
def test_opt_main(self):
with self.setup_cmdline_run(main='org.pantsbuild.OptMain') as cmdline:
expected_suffix = 'java -cp {} org.pantsbuild.OptMain'.format(
os.path.pathsep.join(self._cmdline_classpath))
self.assertEquals(expected_suffix, cmdline[-len(expected_suffix):])
| {
"content_hash": "6a0ac08077cb1deed67bf376d3c887bf",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 95,
"avg_line_length": 40.83018867924528,
"alnum_prop": 0.6866913123844732,
"repo_name": "digwanderlust/pants",
"id": "c9e6e6a20eea8980e2a21450bfe36aecd3750544",
"size": "2311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/test_jvm_run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "310901"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3049918"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "47201"
},
{
"name": "Thrift",
"bytes": "2824"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
import django_dynamic_fixture as fixture
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from readthedocs.projects.forms import ProjectRelationshipForm
from readthedocs.projects.models import Project, ProjectRelationship
from readthedocs.rtd_tests.utils import create_user
class SubprojectFormTests(TestCase):
def test_empty_child(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
form = ProjectRelationshipForm(
{},
project=project,
user=user
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegexpMatches(
form.errors['child'][0],
r'This field is required.'
)
def test_nonexistent_child(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
self.assertFalse(Project.objects.filter(pk=9999).exists())
form = ProjectRelationshipForm(
{'child': 9999},
project=project,
user=user
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegexpMatches(
form.errors['child'][0],
r'Select a valid choice.'
)
def test_adding_subproject_fails_when_user_is_not_admin(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
project.users.add(user)
subproject = fixture.get(Project, slug='subproject')
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project],
transform=lambda n: n,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user
)
form.full_clean()
self.assertEqual(len(form.errors['child']), 1)
self.assertRegexpMatches(
form.errors['child'][0],
r'Select a valid choice.'
)
def test_adding_subproject_passes_when_user_is_admin(self):
user = fixture.get(User)
project = fixture.get(Project, slug='mainproject')
project.users.add(user)
subproject = fixture.get(Project, slug='subproject')
subproject.users.add(user)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject],
transform=lambda n: n,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user
)
form.full_clean()
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(
[r.child for r in project.subprojects.all()],
[subproject]
)
def test_subproject_form_cant_create_sub_sub_project(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
subsubproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject
)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject, subsubproject],
transform=lambda n: n,
)
form = ProjectRelationshipForm(
{'child': subsubproject.pk},
project=subproject,
user=user
)
# The subsubproject is valid here, as far as the child check is
# concerned, but the parent check should fail.
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
['', subsubproject.pk],
)
form.full_clean()
self.assertEqual(len(form.errors['parent']), 1)
self.assertRegexpMatches(
form.errors['parent'][0],
r'Subproject nesting is not supported'
)
def test_excludes_existing_subprojects(self):
user = fixture.get(User)
project = fixture.get(Project, users=[user])
subproject = fixture.get(Project, users=[user])
relation = fixture.get(
ProjectRelationship, parent=project, child=subproject
)
self.assertQuerysetEqual(
Project.objects.for_admin_user(user),
[project, subproject],
transform=lambda n: n,
)
form = ProjectRelationshipForm(
{'child': subproject.pk},
project=project,
user=user
)
self.assertEqual(
[proj_id for (proj_id, __) in form.fields['child'].choices],
[''],
)
@override_settings(PUBLIC_DOMAIN='readthedocs.org')
class ResolverBase(TestCase):
def setUp(self):
with mock.patch('readthedocs.projects.models.broadcast'):
self.owner = create_user(username='owner', password='test')
self.tester = create_user(username='tester', password='test')
self.pip = fixture.get(Project, slug='pip', users=[self.owner], main_language_project=None)
self.subproject = fixture.get(Project, slug='sub', language='ja',
users=[ self.owner],
main_language_project=None)
self.translation = fixture.get(Project, slug='trans', language='ja',
users=[ self.owner],
main_language_project=None)
self.pip.add_subproject(self.subproject)
self.pip.translations.add(self.translation)
@override_settings(PRODUCTION_DOMAIN='readthedocs.org')
def test_resolver_subproject_alias(self):
relation = self.pip.subprojects.first()
relation.alias = 'sub_alias'
relation.save()
with override_settings(USE_SUBDOMAIN=False):
resp = self.client.get('/docs/pip/projects/sub_alias/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp._headers['location'][1],
'http://readthedocs.org/docs/pip/projects/sub_alias/ja/latest/'
)
| {
"content_hash": "16a9c9403a43c6105eb72e4d7d9312c4",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 103,
"avg_line_length": 36.48,
"alnum_prop": 0.5808270676691729,
"repo_name": "pombredanne/readthedocs.org",
"id": "d64d3ee3d6ba7598226703e7b1c74aa80e41974f",
"size": "6384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/rtd_tests/tests/test_subprojects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66514"
},
{
"name": "HTML",
"bytes": "205587"
},
{
"name": "JavaScript",
"bytes": "444672"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1175310"
}
],
"symlink_target": ""
} |
import tensorflow as tf
v = tf.Variable(0, dtype=tf.float32, name='v3')
# 在没有声明滑动平均模型时只有一个变量v,所以下面的语句只会输出v:0
for variables in tf.global_variables():
print(variables.name)
ema = tf.train.ExponentialMovingAverage(0.99)
# 加入命名空间中
maintain_averages_op = ema.apply(tf.global_variables())
# 在申明滑动平均模型之后,TensorFlow会自动生成一个影子变量
# v/ExponentialMovingAverage。于是下面的语句输出
# v:0 和 v/ExponentialMovingAverage:0
for variables in tf.global_variables():
print(variables.name)
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
sess.run(tf.assign(v, 10))
sess.run(maintain_averages_op)
# 保存时候会将v0, v/ExponentialMovingAverage:0 这两个变量保存下来
saver.save(sess, "Saved_model/model2.ckpt")
print(sess.run([v, ema.average(v)]))
| {
"content_hash": "d0abcbe5d086992660f77a345ede95f7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 55,
"avg_line_length": 30.807692307692307,
"alnum_prop": 0.7365792759051186,
"repo_name": "pearpai/TensorFlow-action",
"id": "cbe6b0eea93a98eae975c5a09c446c6929e03ea6",
"size": "990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_learning_with_tensorFlow/Chapter05/p11302.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "193301"
}
],
"symlink_target": ""
} |
import datetime
import os.path
import unittest
from adjunct import opml
HERE = os.path.dirname(__file__)
class OPMLTest(unittest.TestCase):
def test_parse_file(self):
with open(os.path.join(HERE, "sample.opml")) as fh:
doc = opml.parse(fh)
self.assertTrue(doc.root)
self.assertEqual(len(doc.attrs), 1)
self.assertEqual(doc.attrs["title"], "Sample OPML file")
self.assertEqual(len(doc), 3)
# Check the top-level categories.
self.assertEqual(len(doc[0]), 2)
self.assertEqual(len(doc[1]), 1)
self.assertEqual(len(doc[2]), 1)
# Check the last category and the one feed within it.
self.assertEqual(doc[2].attrs["text"], "Personal")
cant_hack = doc[2][0]
# 5, because it includes isComment and isBreakpoint implicitly
self.assertEqual(len(cant_hack.attrs), 5)
self.assertEqual(len(cant_hack), 0)
self.assertEqual(cant_hack.attrs["text"], "Can't Hack")
self.assertEqual(cant_hack.attrs["type"], "rss")
self.assertEqual(
cant_hack.attrs["xmlUrl"], "https://i.canthack.it/feeds/all.xml"
)
self.assertEqual(cant_hack.attrs["isComment"], "false")
self.assertEqual(cant_hack.attrs["isBreakpoint"], "false")
def test_exception(self):
with self.assertRaises(opml.OpmlError):
opml.parse_string(
'<?xml version="1.0" encoding="UTF-8"?>'
'<opml version="1.0"><outline/></opml>'
)
def test_date_parse(self):
# Note: the resulting date is in UTC.
self.assertEqual(
opml.parse_timestamp("Fri, 21 Nov 1997 09:55:06 -0600"),
datetime.datetime(1997, 11, 21, 15, 55, 6),
)
| {
"content_hash": "f40dfbb6d936bbb068a834fc3da5c0a9",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 34.78431372549019,
"alnum_prop": 0.59695603156708,
"repo_name": "kgaughan/adjunct",
"id": "5f079642aabf920bb33d040444ee2353b9e2c531",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_opml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2329"
},
{
"name": "Makefile",
"bytes": "571"
},
{
"name": "Python",
"bytes": "54724"
}
],
"symlink_target": ""
} |
from nose.tools import raises, eq_, ok_
from mock import patch, Mock
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.http import Http404
from django.test import client, TestCase
from us_ignite.common.tests import utils
from us_ignite.hubs.admin import HubRequestAdmin, get_hub_from_request
from us_ignite.hubs.models import HubRequest, Hub
from us_ignite.hubs.tests import fixtures
patch_request_get = patch('us_ignite.hubs.models.HubRequest.objects.get')
class HubRequestAdminTest(TestCase):
def setUp(self):
self.factory = client.RequestFactory()
self.site = AdminSite()
def _tear_down(self):
for model in [HubRequest, Hub]:
model.objects.all().delete()
@raises(Http404)
def test_approve_request_does_not_exists_fails(self):
admin = HubRequestAdmin(HubRequest, self.site)
request = self.factory.get('/')
admin.approve_request(request, 1)
@raises(Http404)
@patch_request_get
def test_request_has_been_approved_fails(self, mock_get):
mock_instance = Mock(spec=HubRequest)()
mock_instance.is_pending.return_value = False
mock_get.return_value = mock_instance
admin = HubRequestAdmin(HubRequest, self.site)
request = self.factory.get('/')
admin.approve_request(request, 1)
@patch_request_get
@patch('us_ignite.hubs.admin.HubApprovalRequestForm')
def test_request_admin_is_render_successfully(self, mock_form, mock_get):
mock_instance = Mock(spec=HubRequest)()
mock_instance.is_pending.return_value = True
mock_get.return_value = mock_instance
admin = HubRequestAdmin(HubRequest, self.site)
request = self.factory.get('/')
response = admin.approve_request(request, 1)
mock_get.assert_called_once_with(id=1)
mock_form.assert_called_once()
eq_(response.status_code, 200)
eq_(response.template_name, 'admin/hubs/request_approval.html')
eq_(sorted(response.context_data.keys()),
sorted(['object', 'form', 'title']))
def test_request_admin_is_approved(self):
hub_request = fixtures.get_hub_request()
admin = HubRequestAdmin(HubRequest, self.site)
request = self.factory.post('/', {'status': HubRequest.APPROVED})
request._messages = utils.TestMessagesBackend(request)
response = admin.approve_request(request, hub_request.id)
eq_(response.status_code, 302)
eq_(response['Location'],
'/admin/hubs/hubrequest/%s/' % hub_request.id)
instance = HubRequest.objects.get(id=hub_request.id)
ok_(instance.hub)
self._tear_down()
class TestGetHubFromRequestAdmin(TestCase):
@patch('us_ignite.hubs.models.Hub.objects.create')
def test_creation_is_successful(self, mock_create):
user = User(id=1)
data = {
'name': 'Hello',
'user': user,
'summary': 'Summary',
'description': 'Description',
'website': 'http://us-ignite.org',
}
instance = HubRequest(**data)
get_hub_from_request(instance)
mock_create.assert_called_once_with(
name='Hello',
contact=user,
summary='Summary',
description='Description',
website='http://us-ignite.org'
)
| {
"content_hash": "2bf3464ce173b55880ebdfe383129de2",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 36.276595744680854,
"alnum_prop": 0.644574780058651,
"repo_name": "us-ignite/us_ignite",
"id": "29591ce312e64c545497b4cf125bca0a7e75bdc7",
"size": "3410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "us_ignite/hubs/tests/admin_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "590320"
},
{
"name": "HTML",
"bytes": "920235"
},
{
"name": "JavaScript",
"bytes": "109759"
},
{
"name": "Nginx",
"bytes": "3047"
},
{
"name": "Pascal",
"bytes": "48"
},
{
"name": "Puppet",
"bytes": "53455"
},
{
"name": "Python",
"bytes": "1321882"
},
{
"name": "Ruby",
"bytes": "370509"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import inspect
import itertools
import new
import os
import re
import sys
import unittest2 as unittest
from datetime import datetime
from collections import defaultdict
import tinctest
from gppylib import gpversion
from tinctest.runner import TINCTextTestResult
def dataProvider(name):
"""
The decorator that defines a function
that can be used as a data provider for a test.
"""
def decorator(f):
f.__is_data_provider__ = True
f.__data_provider_name__ = name
return f
return decorator
def skipLoading(msg):
"""
The decorator that allows to define a class
as a model class so that no tests will be
generated out of the model class.
"""
def decorator(c):
c.__tinc_skip_loading__ = True
c.__tinc_skip_loading_msg__ = msg
# Note: We need the following two variables to see if the decorator
# is declared at a base class level. For eg: consider SQLTestCase which is
# a model and SampleSQLTests which could be the actual test class and if the decorator
# is declared for SQLTestCase, while constructing tests for SampleSQLTests,
# __tinc_skip_loading__ will still be set as there are no class local variables in python.
# Therefore we save the file and the class name from which the annotation comes from
# and we skip loading only if the current class name and file is same as the one at
# which the decorator was declared.
c.__tinc_skip_loading_declared_class__ = c.__name__
c.__tinc_skip_loading_declared_file__ = sys.modules[c.__module__].__file__
return c
return decorator
@skipLoading("Test model. No tests loaded.")
class TINCTestCase(unittest.TestCase):
"""
This is an abstract class and cannot be instantiated directly. Proper implementations
of TINCTestCase require at least one test implementation function *and* proper instantatiations
of such a test case must pass in the name of said function.
Bear in mind, it's uncommon to define new implementations of TINCTestCase, as these would
correspond to new types/models of testing. It's even more uncommon to directy instantiate such
test cases, as test discovery mechanisms should provide that functionality with more robustness.
@metadata: author: Author of the test case
@metadata: maintainer: Current maintainer of the test case
@metadata: description: Testcase description
@metadata: skip: if set to True, skip running the test case
@metadata: created: date/time when the test case was created
@metadata: modified: date/time when the test case was last modified
@metadata: data_provider: tdb
@metadata: tags: tags desc (tbd)
@metadata: product_version: product version desc (tbd)
"""
#: Deprecated: Additional test configuration. For eg, global GUCs
test_config = []
# Private dict of package names for a module
_module_package_dict = defaultdict(str)
# Flag that determines if all tests from this test class are included in the test suite by
# the loader
_all_tests_loaded = True
def __init__(self, methodName, baseline_result = None):
super(TINCTestCase, self).__init__(methodName)
#: Name of the test case. Usually <classname>.<testmethodname>
self.name = None
#: Completely qualified name of the test case. <package>.<module>.<classname>.<methodname>
self.full_name = None
#: Class name to which this test belongs
self.class_name = None
#: Name of the package to which this test belongs
self.package_name = None
#: Name of the module to which this test belongs
self.module_name = None
#: Name of the test method that will be run with this instance
self.method_name = None
#: Author of the test case
self.author = None
#: Maintainer of the test case
self.maintainer = None
#: Description string of the test case
self.description = None
#: Date on which this test case was created
self.created_datetime = None
#: Last modified date of this test case
self.modified_datetime = None
#: All the tags specified for thsi test case
self.tags = None
#: Skip message set through metadata for test cases that should be skipped
self.skip = None
#: Instance of TINCProductVersionMetadata encapsulating the product version metadata specified for this test case
self.product_version = None
#: Name of the data provider to be used for this test case
self.data_provider = None
self._infer_metadata()
#: Start time of the test case. Set when the test case is started
self.start_time = None
#: End time of the test case. Set when the test case finishes execution irrespective of the result
self.end_time = None
#: Duration in ms of the test case execution. Set when the test case finishes execution irrespective of the result
self.duration = None
self.baseline_result = baseline_result
"""
A baseline result object that can be passed on to trending tests. For eg: performance tests
would require a baseline result object from which it can infer the previous runtime and assert
pass or fail for the current run. Currently set only for tests run through deprecated tincdb,
so this may not work anymore and may need to be removed on closer inspection.
"""
#: list of file names that define the artifacts for the test case
self.test_artifacts = []
#: The dictionary that will be set for tests with data providers
self.test_data_dict = {}
#:
self.test_data = None
def _infer_metadata(self):
"""
This function sets all the first-class metadata that belongs to TINCTestCase. Every
subclass is responsible for inferring additional first-class metadata that is required
by its implementation.
"""
self._metadata = {}
for docstring in [self.__doc__, getattr(self, self._testMethodName).__doc__]:
self._parse_metadata_from_doc(docstring)
self.name = "%s.%s" % (self.__class__.__name__, self._testMethodName)
self.full_name = self._get_full_name()
self.author = self._metadata.get('author', None)
self.maintainer = self._metadata.get('maintainer', None)
self.description = self._metadata.get('description', '')
self.skip = self._metadata.get('skip', None)
self.created_datetime = datetime.strptime(self._metadata.get('created', '2000-01-01 00:00:00'), '%Y-%m-%d %H:%M:%S')
self.modified_datetime = datetime.strptime(self._metadata.get('modified', '2000-01-01 00:00:00'), '%Y-%m-%d %H:%M:%S')
self.data_provider = self._metadata.get('data_provider', None)
if self._metadata.get('tags', None) == None:
self.tags = set()
else:
self.tags = set(self._metadata['tags'].split())
if 'product_version' in self._metadata:
try:
self.product_version = _TINCProductVersionMetadata(self._metadata.get('product_version').strip())
except TINCInvalidProductVersionException, e:
self.load_fail(tinctest.TINCException, "Invalid product version specified - %s" %self._metadata.get('product_version').strip())
def _append_docstrings(self, doc):
self.__doc__ = doc + self.__doc__
def _get_full_name(self):
"""
Find the full name of the test. <package>.<module>.<class>.<method>
"""
# TODO: May be we have to do this only once for each test class.
method = self.method_name = self._testMethodName
clazz = self.class_name = self.__class__.__name__
# To find the package keep going up from the module path
# until we don't find an __init__.py
package = ''
file_name = sys.modules[self.__class__.__module__].__file__
module = self.module_name = self.__class__.__module__.split('.')[-1]
if self._module_package_dict[file_name]:
package = self._module_package_dict[file_name]
else:
current_directory = os.path.dirname(file_name)
found = True
while True:
if os.path.exists(os.path.join(current_directory, '__init__.py')):
package = "%s.%s" %(os.path.split(current_directory)[1], package)
else:
break
current_directory = os.path.abspath(os.path.join(current_directory, os.path.pardir))
self._module_package_dict[file_name] = package = package.strip('.')
self.package_name = package
full_name = "%s.%s.%s.%s" %(package,
module, clazz, method)
return full_name
def _parse_metadata_from_doc(self, docstring):
"""
parse the meta from the docsting
"""
if not docstring:
return
lines = docstring.splitlines()
for line in lines:
line = line.strip()
if line.find('@') != 0:
continue
line = line[1:]
if len(line.split()) <= 1:
continue
(key, value) = line.split(' ', 1)
if self._metadata.has_key(key) and key == 'tags':
self._metadata[key] += ' ' + value
elif self._metadata.has_key(key) and key== 'gucs':
self._metadata[key] = self._metadata[key].strip(';') + '; ' + value.strip(';')
else:
self._metadata[key] = value
def load_fail(self, exception_class = None, exception_message = ""):
if exception_class is None:
exception_class = tinctest.TINCException
generic_message = "Loading of test %s failed. " % self.full_name
exception_message = generic_message + exception_message
tinctest.logger.error(exception_message)
raise exception_class(exception_message)
def setUp(self):
"""
TINCTestCase's setUp method is responsible for the following:
-> Skip the test if skip metadata is specified.
"""
if self.skip:
self.skipTest(self.skip)
super(TINCTestCase, self).setUp()
def cleanup(self):
'''
implement this function if there is any special generic cleanup that
needs to be done by the subclass... if we had a good exception handling
model we could use this function as a cleanup for each test by adding
addCleanup
'''
pass
def defaultTestResult(self, stream=None, descriptions=None, verbosity=None):
if stream and descriptions and verbosity:
return TINCTextTestResult(stream, descriptions, verbosity)
else:
return unittest.TestResult()
def get_product_version(self):
"""
This function should be implemented by the subclasses to return the currently
deployed version of the product as a (<product_name>, <version>) tuple.
For eg: ('gpdb' , '4.2.0.0').
This will be used to verify the test case compatiblity against the currently deployed
version of the product. Test case version will be provided as a part of the test case
metadata 'product_version'
@rtype: tuple
@return: A tuple containing the name of the product and the version of the product.
"""
return None
def match_metadata(self, key, value):
"""
This function checks if the value of metadata 'key' matches
'value'. Default will be a straight case-sensitive string compare.
Sub-classes should override this method to implement 'metadata' specific match logic.
For eg: this method takes care of 'tags' by checking if the given value is in the
instance variable self.tags which is a set instead of doing a string compare.
@rtype: boolean
@return: True if the 'value' matches metadata 'key', False otherwise. Note that if the
test case instance does not have the metadata 'key', this will return False
"""
is_match = False
try:
meta_attr = getattr(self, key)
except:
return False
if key == 'tags':
is_match = value in meta_attr
else:
is_match = meta_attr == value
return is_match
def collect_files(self):
"""
Collects files related to the test. This may be log files, core files, etc.,
and is likely specific to the test (or test type) being run. Should be overriden by
sub-classes.
"""
pass
def _get_absolute_filename(self, filename):
"""
Returns filename prefixed with the path where
the TestCase Object resides.
e.g If the ExpansionTestCase object resides in tincrepo/eating/test_expansion.py
and if get_absolute_filename('filename') is invoked from anywhere e.g
tinctest/models/gpdb/expansion/__init__.py, the output will be
tincrepo/eating/filename.
"""
source_file = sys.modules[self.__class__.__module__].__file__
source_dir = os.path.dirname(source_file)
return os.path.join(source_dir, filename)
def add_data_provider_test_methods(self):
"""
If the test has a data provider, this will generate a list of additional tests one for each
set of data returned by the data provider configured for this test.
For eg: if a data provider configured for a test 'test_method'
returns {'key1': 'data1', 'key2': 'data2'}, this method will return a list of tests
['test_method_key1', 'test_method_key2'] with self.test_data set to 'data1' and 'data2'
respectively.
@rtype: list
@return: Returns a list of test cases that has the same logic as the given test case with
self.test_data set for each generated test case to an item returned by the
data provider configured for this test.
"""
test_name = '%s.%s.%s' %(self.__class__.__module__, self.__class__.__name__, self._testMethodName)
data_provider_tests = []
dict_of_test_data_dicts = {}
for each_data_provider in self.data_provider.strip().split():
each_data_provider_func = self._find_data_provider(each_data_provider)
each_test_data_dict = each_data_provider_func()
if not each_test_data_dict:
raise tinctest.TINCException("Data provider %s for test %s should return some data" %(each_data_provider, test_name))
if not type(each_test_data_dict) is dict:
raise tinctest.TINCException("Data provider %s for test %s should return a dict" %(each_data_provider, test_name))
dict_of_test_data_dicts[each_data_provider] = each_test_data_dict
if len(dict_of_test_data_dicts) == 1:
# Just one data provider. Handle it so that none of the existing usage breaks.
# test_data will be a simple tuple of data key & data value
test_data_dict = dict_of_test_data_dicts.values()[0]
for key, value in test_data_dict.items():
new_test_method_name = self._testMethodName + '_' + key
test_tuple = (key, value)
self._add_new_method(test_tuple, new_test_method_name)
data_provider_tests.append(new_test_method_name)
else:
# Multiple data providers. Need to mix-n-match
# test_data will be a list of tuples of (data_provider, data_key, data_value)
data_providers, test_data_dicts = zip(*dict_of_test_data_dicts.items())
product_list = [dict(zip(data_providers, test_data_dict)) for test_data_dict in itertools.product(*test_data_dicts)]
for each_product in sorted(product_list):
new_test_method_name = self._testMethodName
test_tuple = []
for data_provider,test_data_key in sorted(each_product.items()):
test_data_value = dict_of_test_data_dicts[data_provider][test_data_key]
new_test_method_name = new_test_method_name + '_' + test_data_key
test_tuple.append((data_provider, test_data_key, test_data_value))
self._add_new_method(test_tuple, new_test_method_name)
data_provider_tests.append(new_test_method_name)
return data_provider_tests
def _add_new_method(self, test_tuple, new_method_name):
self._orig_testMethodName = self._testMethodName
def test_function(my_self):
my_self.test_data = test_tuple
orig_test_method = getattr(my_self,my_self._orig_testMethodName)
orig_test_method()
new_test_method = new.instancemethod(test_function,
self, self.__class__)
self.__dict__[new_method_name] = new_test_method
def _find_data_provider(self, each_data_provider):
data_provider_function = None
if not data_provider_function:
# Check if the definition is found somewhere besides the module file...
for each_class in inspect.getmro(self.__class__):
if data_provider_function:
break
functions = inspect.getmembers(inspect.getmodule(each_class), predicate=inspect.isfunction)
for (name, function) in functions:
if hasattr(function, '__is_data_provider__'):
if function.__data_provider_name__ == each_data_provider:
data_provider_function = function
break
if not data_provider_function:
test_name = '%s.%s.%s' %(self.__class__.__module__, self.__class__.__name__, self._testMethodName)
raise tinctest.TINCException("Invalid data provider specified for test - %s" %test_name)
return data_provider_function
class TINCInvalidProductVersionException(Exception):
"""
Exception that is thrown when product version metadata is validated
"""
pass
class _TINCProductVersionMetadata(object):
"""
This encapsulates the information given as a part of 'product_version' metadata.
"""
def __init__(self, product_version_str=None):
"""
Parse the given metadata information and form the corresponding product version object.
"""
# A dictionary of product and a list of _TINCProductVersionRange for inclusive checks
self.product_version_included = defaultdict(list)
# A dictionary of product and a list of _TINCProductVersionRange for exclusive checks
self.product_version_excluded = defaultdict(list)
self.product_version_str = None
if product_version_str:
self.product_version_str = product_version_str.strip()
self._parse_product_version_metadata()
def __add__(self, other_product_version_metadata):
if isinstance(other_product_version_metadata, basestring):
other = _TINCProductVersionMetadata(other_product_version_metadata)
else:
other = other_product_version_metadata
result = _TINCProductVersionMetadata()
result.product_version_included = copy.deepcopy(self.product_version_included)
result.product_version_excluded = copy.deepcopy(self.product_version_excluded)
for product in other.product_version_included:
for version in other.product_version_included[product]:
if result._contains_version_included(product, version) >= 0:
continue
result.product_version_included[product].append(version)
for product in other.product_version_excluded:
for version in other.product_version_excluded[product]:
if result._contains_version_excluded(product, version) >= 0:
continue
result.product_version_excluded[product].append(version)
return result
def __sub__(self, other_product_version_metadata):
if isinstance(other_product_version_metadata, basestring):
other = _TINCProductVersionMetadata(other_product_version_metadata)
else:
other = other_product_version_metadata
result = _TINCProductVersionMetadata()
result.product_version_included = copy.deepcopy(self.product_version_included)
result.product_version_excluded = copy.deepcopy(self.product_version_excluded)
for product in other.product_version_included:
for version in other.product_version_included[product]:
idx = result._contains_version_included(product, version)
if idx >= 0:
del result.product_version_included[product][idx]
if not result.product_version_included[product]:
del result.product_version_included[product]
for product in other.product_version_excluded:
for version in other.product_version_excluded[product]:
idx = result._contains_version_excluded(product, version)
if idx >= 0:
del result.product_version_excluded[product][idx]
if not result.product_version_excluded[product]:
del result.product_version_excluded[product]
return result
def __eq__(self, other_product_version_metadata):
"""
Two product version metadata objects are considered equal if all product version included
and product version excluded objects are the same in both
Uses literal comparisons when comparing version /version range objects
"""
if isinstance(other_product_version_metadata, basestring):
other = _TINCProductVersionMetadata(other_product_version_metadata)
else:
other = other_product_version_metadata
temp_self = _TINCProductVersionMetadata()
temp_self.product_version_included = copy.deepcopy(self.product_version_included)
temp_self.product_version_excluded = copy.deepcopy(self.product_version_excluded)
temp_other = _TINCProductVersionMetadata()
temp_other.product_version_included = copy.deepcopy(other.product_version_included)
temp_other.product_version_excluded = copy.deepcopy(other.product_version_excluded)
for product in self.product_version_included:
for version in self.product_version_included[product]:
if other._contains_version_included(product, version) >= 0:
del temp_self.product_version_included[product][0]
if not temp_self.product_version_included[product]:
del temp_self.product_version_included[product]
del temp_other.product_version_included[product][0]
if not temp_other.product_version_included[product]:
del temp_other.product_version_included[product]
else:
return False
if temp_self.product_version_included or temp_other.product_version_included:
# This means there are unmatched elements in one of the lists
return False
for product in self.product_version_excluded:
for version in self.product_version_excluded[product]:
if other._contains_version_excluded(product, version) >= 0:
del temp_self.product_version_excluded[product][0]
if not temp_self.product_version_excluded[product]:
del temp_self.product_version_excluded[product]
del temp_other.product_version_excluded[product][0]
if not temp_other.product_version_excluded[product]:
del temp_other.product_version_excluded[product]
else:
return False
if temp_self.product_version_included[product] or temp_other.product_version_included[product]:
# This means there are unmatched elements in one of the lists
return False
if temp_self.product_version_excluded or temp_other.product_version_excluded:
# This means there are unmatched elements in one of the lists
return False
return True
def _contains_version_included(self, product, version):
"""
Verifies if a given product , version / version range is present in the inclusive ranges
Uses literal comparisons.
"""
if not product in self.product_version_included:
return -1
version_included_list = self.product_version_included[product]
for item, idx in zip(version_included_list, range(len(version_included_list))):
if type(version) == type(item) and version.is_literal_match(item):
return idx
return -1
def _contains_version_excluded(self, product, version):
"""
Verifies if a given product , version / version range is present in the exclusive ranges
Uses literal comparisons
"""
if not product in self.product_version_excluded:
return -1
version_excluded_list = self.product_version_excluded[product]
for item, idx in zip(version_excluded_list, range(len(version_excluded_list))):
if type(version) == type(item) and version.is_literal_match(item):
return idx
return -1
def __str__(self):
if self.product_version_str:
return self.product_version_str
else:
if not self.product_version_included and not self.product_version_excluded:
return str(None)
return self._get_product_version_metadata_string()
def _get_product_version_metadata_string(self):
"""
Return a string a representation of product version metadata
"""
version_str_list = []
for item in self.product_version_included:
version_str_list.append("%s : %s" %(item,
", ".join(str(element) for element in self.product_version_included[item])))
for item in self.product_version_excluded:
version_str_list.append("%s : %s" %(item,
", ".join('-%s' %str(element) for element in self.product_version_excluded[item])))
return ", ".join(version_str_list)
def _parse_product_version_metadata(self):
# product_version_str will be in the format of:
# gpdb: 4.2.6.1, [4.3 - 4.4], -4.3.1.1, (4.5-4.6), -(4.5.1.1-4.5.1.3), hawk:
multiple_versions = []
if ',' in self.product_version_str:
multiple_versions = self.product_version_str.split(',')
else:
multiple_versions.append(self.product_version_str)
product_attribute = None
for product_version in multiple_versions:
product_exists = re.search('(.*):(.*)', product_version)
if product_exists:
# product is defined; Store it to use it for subsequent versions
product_attribute = product_exists.group(1)
product_version = product_exists.group(2)
if product_attribute:
product_attribute = product_attribute.strip()
else:
raise TINCInvalidProductVersionException("Given product version %s is invalid. " %self.product_version_str)
product_version = product_version.strip()
if product_version.startswith('-'):
# add it to exclusive
product_version = product_version.strip('-')
if product_version.startswith('[') or product_version.startswith('('):
self.product_version_excluded[product_attribute].append(_TINCProductVersionRange(product_version))
else:
self.product_version_excluded[product_attribute].append(_TINCProductVersion(product_version))
else:
# add it to inclusive
if product_version.startswith('[') or product_version.startswith('('):
self.product_version_included[product_attribute].append(_TINCProductVersionRange(product_version))
else:
self.product_version_included[product_attribute].append(_TINCProductVersion(product_version))
def match_product_version(self, product, version_str):
"""
Given a product and a version string, verify if there is a match
in the product version metadata
"""
# Create a local version object
product = product.strip()
version_str = version_str.strip()
dut_version_object = _TINCProductVersion(version_str, filler=str(_TINCProductVersion._upper_bound))
# Exclusives always take precedence! First, check if dut version is there
for iter_version_object in self.product_version_excluded[product]:
if iter_version_object.match_version(dut_version_object):
tinctest.logger.info("Product %s version %s does not fall in product_version metadata - %s" %(product, version_str, self.product_version_str))
return False
# Check inclusives
for iter_version_object in self.product_version_included[product]:
if iter_version_object.match_version(dut_version_object):
return True
# By default we return False, if the product is not specified in product version
tinctest.logger.info("Product %s version %s does not fall in product_version metadata - %s" %(product, version_str, self.product_version_str))
return False
class _TINCProductVersionRange(object):
"""
Encapsulates a specific range of a product version.
"""
def __init__(self, version_range_str):
self.version_range_str = version_range_str.strip()
self.upper_bound_version = None
self.lower_bound_version = None
self.upper_bound_inclusive = None
self.lower_bound_inclusive = None
self._parse_product_version_range()
def _parse_product_version_range(self):
"""
Ranges should be in the format [range1-range2], (range1-range2),
(range1-), (-range2) and any combination of parenthesis
'[]' means inclusive
'()' means exclusive
"""
version_range_pattern = r"""^(?P<lb_inc>\(|\[) # Matches the first bracket
(?P<lb>.*?) # Matches the lower bound version string
- #Matches the range separator
(?P<ub>.*?) # Matches the upper bound version string
(?P<ub_inc>\)|\])$ # Matches the last bracket
"""
matches = re.match(version_range_pattern, self.version_range_str, flags = re.I | re.X)
if not matches:
raise TINCInvalidProductVersionException("Given version range string %s is invalid." %self.version_range_str)
lb = matches.group('lb')
lb_inc = matches.group('lb_inc')
ub_inc = matches.group('ub_inc')
ub = matches.group('ub')
self.lower_bound_inclusive = True if lb_inc == '[' else False
self.upper_bound_inclusive = True if ub_inc == ']' else False
self.lower_bound_version = _TINCProductVersion(lb, filler=str(_TINCProductVersion._lower_bound)) if lb else _TINCProductVersion('0.0.0.0')
self.upper_bound_version = _TINCProductVersion(ub, filler=str(_TINCProductVersion._upper_bound)) if ub else _TINCProductVersion('main')
# Incr / decr upper bound and lower bound versions
if not self.lower_bound_inclusive:
self.lower_bound_version = self.lower_bound_version.incr()
if not self.upper_bound_inclusive:
self.upper_bound_version = self.upper_bound_version.decr()
# Assert that upper bound version is greater than lower bound version
if not self.upper_bound_version > self.lower_bound_version:
raise TINCInvalidProductVersionException("Upper bound version %s should be greater than lower bound version %s" %(self.upper_bound_version,
self.lower_bound_version))
def is_literal_match(self, other):
if not isinstance(other, _TINCProductVersionRange):
other = _TINCProductVersionRange(other)
if not self.upper_bound_version.is_literal_match(other.upper_bound_version) or \
not self.lower_bound_version.is_literal_match(other.lower_bound_version):
return False
return True
def match_version(self, version):
"""
Given a __TINCProductVersion, verify if it falls within
this range.
"""
if not isinstance(version, _TINCProductVersion):
version = _TINCProductVersion(version)
if version >= self.lower_bound_version and version <= self.upper_bound_version:
return True
return False
def __cmp__(self, other):
if not isinstance(other, _TINCProductVersionRange):
other = _TINCProductVersionRange(other)
if (self.lower_bound_version == other.lower_bound_version) and \
(self.upper_bound_version == other.upper_bound_version):
return 0
if self.lower_bound_version != other.lower_bound_version:
return cmp(self.lower_bound_version, other.lower_bound_version)
return cmp(self.upper_bound_version, other.upper_bound_version)
def __str__(self):
return self.version_range_str
class _TINCProductVersion(object):
"""
This can just be a wrapper around gpversion.GpVersion. However since gpversion does
not take care of hotfixes, we should include that here.
"""
_main_version = '99.99.99.99'
_lower_bound = 0
_upper_bound = 99
def __init__(self, version_str, filler='x'):
self.version_str = version_str.strip() if version_str else None
self.filler = filler.strip()
# The four part version. Use filler for empty part numbers
# Should either be a digit or 'x' which is the allowed wildcard.
self.version = []
# The hotfix string
self.hotfix = None
self._parse_version_str(filler)
def _parse_version_str(self, filler):
"""
Possible regex:
4.2.x, 4.2.x.x, 4.2, 4.x, x, 4.2.1.3, 4.2.1.3A, 4.2.1.3B, etc
It is the user's responsibility to pass a string like this to
__TINCProductVersion__.
"""
# filler should just be 'x' or 'digits'
filler_pattern = r"""^(x|\d+)$"""
if not re.match(filler_pattern, filler):
raise TINCInvalidProductVersionException("Invalid filler specified. Should be 'x' or 'digits'")
# '' or 'x' or None means a complete wild card match.
if not self.version_str or self.version_str == 'x' or self.version_str == 'X':
self.version.extend(['x'] * 4)
return
if self.version_str.lower() == 'main':
self.version.extend(self._main_version.split('.'))
return
_version_str_pattern = r"""^(?P<majors>((x|\d+)\.){1,3}) # Matches upto first three parts of a four part version x. , x.x. , x.x.x.
(?P<minor>x|\d+) # Matches the final part of a four part version
(?P<hotfix>([a-z]+\d*)*)$ # Matches the hotfix part of the version string
"""
matches = re.match(_version_str_pattern, self.version_str, flags = re.I | re.X)
if not matches:
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str)
majors = matches.group('majors')
minor = matches.group('minor')
hotfix = matches.group('hotfix')
#majors now have to be of the form x. , x.x. or x.x.x.
#filter it to remove the last part which will be None
major_parts = [x.strip() for x in filter(None, majors.split('.'))]
if len(major_parts) > 3 or len(major_parts) < 1:
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str)
# minor should not be none
if not minor:
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str)
self.version.extend([x.lower() for x in major_parts])
self.version.append(minor.lower())
#hotfix should be given only if the whole version is given
# For eg: 4.2MS1 will be invalid, 4.2.3.5MS1 , 4.2.2.4MS1 is valid,
# 4.2.xMS1 will also be invalid, 4.2.x.xMS1 will also be invalid
if (len(self.version) < 4 and hotfix) or ('x' in self.version and hotfix):
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str + \
"Hotfix can be provided only with four part versions.")
if hotfix:
self.hotfix = hotfix.strip()
# If version does not have four parts , fill the list with 'filler' to make it a four part version
if len(self.version) < 4:
self.version.extend([filler.lower()] * (4 - len(self.version)))
# Strip all version components
self.version = [x.strip() for x in self.version]
def incr(self):
"""
Utility method to increment the version. 4.2.0.0 -> 4.2.0.1
4.2.99.99 -> 4.3.0.0
Returns a new version object after the increment operation
"""
if 'x' in self.version:
return _TINCProductVersion(self.version_str)
"""
Version with hotfix increment supported.
4.3.1.0LA1 -> 4.3.1.0LA2
Assumptions: the hotfix digits will cross double digit
"""
if self.hotfix:
hotfix_filter = re.compile("([a-zA-Z]+)([0-9]*)")
hotfix_split = hotfix_filter.match(self.hotfix)
if len(hotfix_split.groups()) > 1:
if hotfix_split.group(2) == "":
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str + \
"\nIncrement Operation not supported on hotfix versions without digits at the end \
Hotfix should end with a number \"LA1\", \"MS1\"")
else:
num = int(hotfix_split.group(2)) + 1
new_version = str(".".join(str(x) for x in self.version))
new_version = new_version + hotfix_split.group(1) + str(num)
return _TINCProductVersion(new_version)
else:
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str + \
"\nIncrement Operation not supported on hotfix versions without digits at the end \
Hotfix should end with a number \"LA1\", \"MS1\"")
# If it is main version , return
if self == _TINCProductVersion('main'):
return _TINCProductVersion('main')
new_version = []
do_incr = True
for i, v in enumerate(reversed(self.version)):
if do_incr:
new_version.append(str((int(v) + 1) % (self._upper_bound + 1)))
else:
new_version.append(v)
# Stop incr if we do not wrap around
if not new_version[i] == '0':
do_incr = False
return _TINCProductVersion('.'.join(list(reversed(new_version))))
def decr(self):
"""
Reverse of incr
Utility method to decrement the version. 4.2.0.0 -> 4.1.99.99
4.2.99.99 -> 4.2.99.98, 4.2.99.0 -> 4.2.98.99
Returns a new version object after the decrement operation
"""
if 'x' in self.version:
return _TINCProductVersion(self.version_str)
"""
Version with hotfix decrement supported.
4.3.1.0LA5 -> 4.3.1.0LA4
4.3.1.0LA1 -> 4.3.1.0 hotfix section removed
"""
if self.hotfix:
hotfix_filter = re.compile("([a-zA-Z]+)([0-9]*)")
hotfix_split = hotfix_filter.match(self.hotfix)
if len(hotfix_split.groups()) > 1:
if hotfix_split.group(2) == "":
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str + \
"\nDecrement Operation not supported on hotfix versions without digits at the end \
Hotfix should end with a number Ex.: \"LA1\", \"MS1\"")
if int(hotfix_split.group(2)) ==1:
new_version = str(".".join(str(x) for x in self.version))
return _TINCProductVersion(new_version)
else:
num = int(hotfix_split.group(2)) - 1
new_version = str(".".join(str(x) for x in self.version))
new_version = new_version + hotfix_split.group(1) + str(num)
return _TINCProductVersion(new_version)
else:
raise TINCInvalidProductVersionException("Given version string %s is invalid." %self.version_str + \
"\nDecrement Operation not supported on hotfix versions without digits at the end \
Hotfix should end with a number Ex.: \"LA1\", \"MS1\"")
# If it is main version , return
if self == _TINCProductVersion('0.0.0.0'):
return _TINCProductVersion('0.0.0.0')
new_version = []
do_decr = True
for i, v in enumerate(reversed(self.version)):
if do_decr:
new_version.append(str((int(v) - 1) % (self._upper_bound + 1)))
else:
new_version.append(v)
# Stop decr if we do not wrap around
if not new_version[i] == '99':
do_decr = False
return _TINCProductVersion('.'.join(list(reversed(new_version))))
def match_version(self, other):
"""
Find if the other version object is equal to this version object
"""
if not isinstance(other, _TINCProductVersion):
raise TINCInvalidProductVersionException("Comparison supported only between two version instances.")
if self == other:
return True
return False
def is_literal_match(self, other):
"""
Find if the other version object is literally equal to this version object.
This means an exact match for the version_str
"""
version1 = str(self)
version2 = str(other)
return version1.lower() == version2.lower()
def __cmp__(self, other):
"""
Implement comparison operations. Take into account hotfix versions.
"""
if not isinstance(other, _TINCProductVersion):
raise TINCInvalidProductVersionException("Comparison supported only between two version instances.")
# Compare the list. Take into account the wildcard 'x'
for x, y in zip(self.version, other.version):
if x.lower() == 'x' or y.lower() == 'x':
continue
if int(x) < int(y):
return -1
if int(x) > int(y):
return 1
# If it gets here , versions are equal so far and we have to do hotfix match
# Do hotfix match only if there are no wildcards in the version. We want to consider
# 4.2.x to be equal to 4.2.1.0A 4.2.2.1B etc
# If the given version does not have a wild card such as 4.2.1.0 then we dont consider
# to be equal to 4.2.1.0A
# Also if the user specifies 4.2.x.1 (which is a corner case), then we dont do hotfix match
# For eg: 4.2.x.1 will be considered equal to 4.2.9.1A 4.2.10.1 4.2.10.1B etc
if 'x' in self.version or 'x' in other.version:
return 0
if not self.hotfix or not other.hotfix:
return cmp(self.hotfix, other.hotfix)
# If both are not None, do case insensitive comparison
return cmp(self.hotfix.lower(), other.hotfix.lower())
def __str__(self):
if not self.version_str:
return ".".join(self.version)
return self.version_str
| {
"content_hash": "070d857180fe7e36f07bafdf922426b1",
"timestamp": "",
"source": "github",
"line_count": 1012,
"max_line_length": 158,
"avg_line_length": 45.70652173913044,
"alnum_prop": 0.6021835477245703,
"repo_name": "lintzc/gpdb",
"id": "47c421e5c3f74ade438bb462747c6b5769a34e34",
"size": "46255",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/tinc/tinctest/case.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "C",
"bytes": "35144943"
},
{
"name": "C++",
"bytes": "3731160"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "Cucumber",
"bytes": "829167"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Groff",
"bytes": "631842"
},
{
"name": "HTML",
"bytes": "169455"
},
{
"name": "Java",
"bytes": "307541"
},
{
"name": "Lex",
"bytes": "196276"
},
{
"name": "M4",
"bytes": "78510"
},
{
"name": "Makefile",
"bytes": "431523"
},
{
"name": "Objective-C",
"bytes": "22149"
},
{
"name": "PLSQL",
"bytes": "190501"
},
{
"name": "PLpgSQL",
"bytes": "8131027"
},
{
"name": "Perl",
"bytes": "3933982"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "9229659"
},
{
"name": "Ruby",
"bytes": "21343"
},
{
"name": "SQLPL",
"bytes": "1860160"
},
{
"name": "Shell",
"bytes": "484246"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "486035"
}
],
"symlink_target": ""
} |
'''Plist operation'''
from __future__ import unicode_literals
import re
import zipfile
import biplist
def get_plist_from_ipa(path):
'''Get plist file from IPA
@param path: IPA file path
@return plist bytes
'''
plist_file_re = re.compile(r'Payload/.*\.app/Info.plist')
try:
ipa = zipfile.ZipFile(path)
except:
raise IOError('Failed to read IPA file')
file_list = ipa.namelist()
plist_path = ""
for file_name in file_list:
if plist_file_re.match(file_name):
plist_path = file_name
break
else:
raise NameError('Info.plist not found in this IPA')
try:
plist = ipa.read(plist_path)
except:
raise IOError('Fail to read plist')
return plist
def get_info(path):
'''Get plist file from IPA
@param path: IPA file path
@return {'version': '', 'build': '', 'identifier': '', 'name':''}
'''
base_attrs = {
'version': b'CFBundleShortVersionString',
'build': b'CFBundleVersion',
'identifier': b'CFBundleIdentifier',
'name': b'CFBundleDisplayName',
}
try:
plist = biplist.readPlistFromString(get_plist_from_ipa(path))
except:
raise IOError('Fail to read plist')
return {k: plist.get(v).decode() for k, v in base_attrs.items()} #pylint: disable=E1103
| {
"content_hash": "ee655b07036f7cf536fc212b802f0882",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 91,
"avg_line_length": 22.933333333333334,
"alnum_prop": 0.592296511627907,
"repo_name": "zqqf16/wad",
"id": "69663cf625ef6e0feca0fe108328e46476a31ff9",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wad/plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "10095"
}
],
"symlink_target": ""
} |
__author__ = 'IRSEN'
# -*- coding: utf-8 -*-
import mysql.connector
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True # отключаем кеширование в бд
def get_group_list(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list=[]
cursor = self.connection.cursor()
try:
cursor.execute('''select id, firstname, lastname, address, home, mobile, work, phone2, email, email2, email3
from addressbook where deprecated="0000-00-00 00:00:00"''')
for row in cursor:
(id, firstname, lastname, address, home, mobile, work, phone2, email, email2, email3) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, address=address,
homephone=home, mobilephone=mobile, workphone=work, secondaryphone=phone2,
email=email, email2=email2, email3=email3))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close() | {
"content_hash": "8e2b9202872085f98be50ad546475ddd",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 120,
"avg_line_length": 38.97826086956522,
"alnum_prop": 0.5883993307306191,
"repo_name": "nyblinnn/python_training_for_testers",
"id": "62086e3fe403d847ff92d0a0602f58e760f4f79d",
"size": "1816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35228"
}
],
"symlink_target": ""
} |
""".. Ignore pydocstyle D400.
==================
Rating API filters
==================
"""
from django_filters import rest_framework as filters
from rolca.rating.models import Rating
class RatingFilter(filters.FilterSet):
"""Filter for Submission API endpoint."""
theme = filters.CharFilter(field_name="submission__theme")
class Meta:
model = Rating
fields = {
"submission": ["exact"],
}
| {
"content_hash": "a66e44eee476e1cf33e6b5287f72db7b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 20.136363636363637,
"alnum_prop": 0.5959367945823928,
"repo_name": "dblenkus/rolca",
"id": "129e5369811ef4c120e7fa30f28ec59c61916d9c",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rolca/rating/api/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1186"
},
{
"name": "Python",
"bytes": "109206"
},
{
"name": "Shell",
"bytes": "345"
}
],
"symlink_target": ""
} |
from collections import Counter, defaultdict
from itertools import chain
from operator import attrgetter
from sqlalchemy import inspect
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.modules.events.abstracts.models.reviews import AbstractAction
from indico.modules.events.abstracts.settings import AllowEditingType
from indico.modules.events.contributions.models.contributions import CustomFieldsMixin, _get_next_friendly_id
from indico.modules.events.contributions.models.persons import AuthorType
from indico.modules.events.models.persons import AuthorsSpeakersMixin
from indico.modules.events.models.reviews import ProposalMixin, ProposalRevisionMixin
from indico.util.date_time import now_utc
from indico.util.enum import IndicoEnum, RichIntEnum
from indico.util.i18n import _
from indico.util.locators import locator_property
from indico.util.string import MarkdownText, format_repr, text_to_repr
class AbstractState(RichIntEnum):
__titles__ = [None, _('Submitted'), _('Withdrawn'), _('Accepted'), _('Rejected'), _('Merged'), _('Duplicate'),
_('Invited')]
__css_classes__ = [None, '', 'outline dashed', 'success', 'error', 'visited', 'strong', 'warning']
submitted = 1
withdrawn = 2
accepted = 3
rejected = 4
merged = 5
duplicate = 6
invited = 7
class AbstractPublicState(RichIntEnum):
__titles__ = {i: title for i, title in enumerate(AbstractState.__titles__[2:], 2)}
__titles__.update({-1: _('Awaiting Review'), -2: _('Under Review')})
__css_classes__ = {i: css_class for i, css_class in enumerate(AbstractState.__css_classes__[2:], 2)}
__css_classes__.update({-1: '', -2: 'highlight'})
# regular states (must match AbstractState!)
withdrawn = 2
accepted = 3
rejected = 4
merged = 5
duplicate = 6
invited = 7
# special states
awaiting = -1
under_review = -2
class AbstractReviewingState(RichIntEnum):
__titles__ = [_('Not Started'), _('In progress'), _('Positive'), _('Conflicting'), _('Negative'), _('Mixed')]
__css_classes__ = ['', '', 'success', '', 'error', 'warning']
not_started = 0
in_progress = 1
positive = 2
conflicting = 3
negative = 4
mixed = 5
class EditTrackMode(int, IndicoEnum):
none = 0
both = 1
reviewed_for = 2
class Abstract(ProposalMixin, ProposalRevisionMixin, DescriptionMixin, CustomFieldsMixin, AuthorsSpeakersMixin,
db.Model):
"""An abstract that can be associated to a Contribution."""
__tablename__ = 'abstracts'
__auto_table_args = (db.Index(None, 'friendly_id', 'event_id', unique=True,
postgresql_where=db.text('NOT is_deleted')),
db.CheckConstraint('(state = {}) OR (accepted_track_id IS NULL)'
.format(AbstractState.accepted),
name='accepted_track_id_only_accepted'),
db.CheckConstraint('(state = {}) OR (accepted_contrib_type_id IS NULL)'
.format(AbstractState.accepted),
name='accepted_contrib_type_id_only_accepted'),
db.CheckConstraint('(state = {}) = (merged_into_id IS NOT NULL)'
.format(AbstractState.merged),
name='merged_into_id_only_merged'),
db.CheckConstraint('(state = {}) = (duplicate_of_id IS NOT NULL)'
.format(AbstractState.duplicate),
name='duplicate_of_id_only_duplicate'),
db.CheckConstraint('(state IN ({}, {}, {}, {})) = (judge_id IS NOT NULL)'
.format(AbstractState.accepted, AbstractState.rejected,
AbstractState.merged, AbstractState.duplicate),
name='judge_if_judged'),
db.CheckConstraint('(state IN ({}, {}, {}, {})) = (judgment_dt IS NOT NULL)'
.format(AbstractState.accepted, AbstractState.rejected,
AbstractState.merged, AbstractState.duplicate),
name='judgment_dt_if_judged'),
db.CheckConstraint(f'(state != {AbstractState.invited}) OR (uuid IS NOT NULL)',
name='uuid_if_invited'),
{'schema': 'event_abstracts'})
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
marshmallow_aliases = {'_description': 'content'}
# Proposal mixin properties
proposal_type = 'abstract'
call_for_proposals_attr = 'cfa'
delete_comment_endpoint = 'abstracts.delete_abstract_comment'
create_comment_endpoint = 'abstracts.comment_abstract'
edit_comment_endpoint = 'abstracts.edit_abstract_comment'
create_review_endpoint = 'abstracts.review_abstract'
edit_review_endpoint = 'abstracts.edit_review'
create_judgment_endpoint = 'abstracts.judge_abstract'
revisions_enabled = False
AUTHORS_SPEAKERS_DISPLAY_ORDER_ATTR = 'display_order_key_lastname'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
id = db.Column(
db.Integer,
primary_key=True
)
uuid = db.Column(
UUID,
index=True,
unique=True,
nullable=True
)
friendly_id = db.Column(
db.Integer,
nullable=False,
default=_get_next_friendly_id
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
title = db.Column(
db.String,
nullable=False
)
#: ID of the user who submitted the abstract
submitter_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
submitted_contrib_type_id = db.Column(
db.Integer,
db.ForeignKey('events.contribution_types.id', ondelete='SET NULL'),
nullable=True,
index=True
)
submitted_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
modified_by_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=True,
index=True
)
modified_dt = db.Column(
UTCDateTime,
nullable=True,
)
state = db.Column(
PyIntEnum(AbstractState),
nullable=False,
default=AbstractState.submitted
)
submission_comment = db.Column(
db.Text,
nullable=False,
default=''
)
#: ID of the user who judged the abstract
judge_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=True
)
_judgment_comment = db.Column(
'judgment_comment',
db.Text,
nullable=False,
default=''
)
judgment_dt = db.Column(
UTCDateTime,
nullable=True,
)
accepted_track_id = db.Column(
db.Integer,
db.ForeignKey('events.tracks.id', ondelete='SET NULL'),
nullable=True,
index=True
)
accepted_contrib_type_id = db.Column(
db.Integer,
db.ForeignKey('events.contribution_types.id', ondelete='SET NULL'),
nullable=True,
index=True
)
merged_into_id = db.Column(
db.Integer,
db.ForeignKey('event_abstracts.abstracts.id'),
index=True,
nullable=True
)
duplicate_of_id = db.Column(
db.Integer,
db.ForeignKey('event_abstracts.abstracts.id'),
index=True,
nullable=True
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'abstracts',
primaryjoin='(Abstract.event_id == Event.id) & ~Abstract.is_deleted',
cascade='all, delete-orphan',
lazy=True
)
)
#: User who submitted the abstract
submitter = db.relationship(
'User',
lazy=True,
foreign_keys=submitter_id,
backref=db.backref(
'abstracts',
primaryjoin='(Abstract.submitter_id == User.id) & ~Abstract.is_deleted',
lazy='dynamic'
)
)
modified_by = db.relationship(
'User',
lazy=True,
foreign_keys=modified_by_id,
backref=db.backref(
'modified_abstracts',
primaryjoin='(Abstract.modified_by_id == User.id) & ~Abstract.is_deleted',
lazy='dynamic'
)
)
submitted_contrib_type = db.relationship(
'ContributionType',
lazy=True,
foreign_keys=submitted_contrib_type_id,
backref=db.backref(
'proposed_abstracts',
primaryjoin='(Abstract.submitted_contrib_type_id == ContributionType.id) & ~Abstract.is_deleted',
lazy=True,
passive_deletes=True
)
)
submitted_for_tracks = db.relationship(
'Track',
secondary='event_abstracts.submitted_for_tracks',
collection_class=set,
backref=db.backref(
'abstracts_submitted',
primaryjoin='event_abstracts.submitted_for_tracks.c.track_id == Track.id',
secondaryjoin='(event_abstracts.submitted_for_tracks.c.abstract_id == Abstract.id) & ~Abstract.is_deleted',
collection_class=set,
lazy=True,
passive_deletes=True
)
)
reviewed_for_tracks = db.relationship(
'Track',
secondary='event_abstracts.reviewed_for_tracks',
collection_class=set,
backref=db.backref(
'abstracts_reviewed',
primaryjoin='event_abstracts.reviewed_for_tracks.c.track_id == Track.id',
secondaryjoin='(event_abstracts.reviewed_for_tracks.c.abstract_id == Abstract.id) & ~Abstract.is_deleted',
collection_class=set,
lazy=True,
passive_deletes=True
)
)
#: User who judged the abstract
judge = db.relationship(
'User',
lazy=True,
foreign_keys=judge_id,
backref=db.backref(
'judged_abstracts',
primaryjoin='(Abstract.judge_id == User.id) & ~Abstract.is_deleted',
lazy='dynamic'
)
)
accepted_track = db.relationship(
'Track',
lazy=True,
backref=db.backref(
'abstracts_accepted',
primaryjoin='(Abstract.accepted_track_id == Track.id) & ~Abstract.is_deleted',
lazy=True,
passive_deletes=True
)
)
accepted_contrib_type = db.relationship(
'ContributionType',
lazy=True,
foreign_keys=accepted_contrib_type_id,
backref=db.backref(
'abstracts_accepted',
primaryjoin='(Abstract.accepted_contrib_type_id == ContributionType.id) & ~Abstract.is_deleted',
lazy=True,
passive_deletes=True
)
)
merged_into = db.relationship(
'Abstract',
lazy=True,
remote_side=id,
foreign_keys=merged_into_id,
backref=db.backref(
'merged_abstracts',
primaryjoin=(db.remote(merged_into_id) == id) & ~db.remote(is_deleted),
lazy=True
)
)
duplicate_of = db.relationship(
'Abstract',
lazy=True,
remote_side=id,
foreign_keys=duplicate_of_id,
backref=db.backref(
'duplicate_abstracts',
primaryjoin=(db.remote(duplicate_of_id) == id) & ~db.remote(is_deleted),
lazy=True
)
)
#: Data stored in abstract/contribution fields
field_values = db.relationship(
'AbstractFieldValue',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'abstract',
lazy=True
)
)
#: Persons associated with this abstract
person_links = db.relationship(
'AbstractPersonLink',
lazy=True,
cascade='all, delete-orphan',
order_by='AbstractPersonLink.display_order',
backref=db.backref(
'abstract',
lazy=True
)
)
# relationship backrefs:
# - comments (AbstractComment.abstract)
# - contribution (Contribution.abstract)
# - duplicate_abstracts (Abstract.duplicate_of)
# - email_logs (AbstractEmailLogEntry.abstract)
# - files (AbstractFile.abstract)
# - merged_abstracts (Abstract.merged_into)
# - proposed_related_abstract_reviews (AbstractReview.proposed_related_abstract)
# - reviews (AbstractReview.abstract)
@property
def candidate_contrib_types(self):
contrib_types = set()
for track in self.reviewed_for_tracks:
if self.get_track_reviewing_state(track) == AbstractReviewingState.positive:
review = next((x for x in self.reviews if x.track == track), None)
contrib_types.add(review.proposed_contribution_type)
return contrib_types
@property
def candidate_tracks(self):
states = {AbstractReviewingState.positive, AbstractReviewingState.conflicting}
return {t for t in self.reviewed_for_tracks if self.get_track_reviewing_state(t) in states}
@property
def edit_track_mode(self):
if not inspect(self).persistent:
return EditTrackMode.both
elif self.state not in {AbstractState.submitted, AbstractState.withdrawn}:
return EditTrackMode.none
elif (self.public_state in (AbstractPublicState.awaiting, AbstractPublicState.withdrawn) and
self.reviewed_for_tracks == self.submitted_for_tracks):
return EditTrackMode.both
else:
return EditTrackMode.reviewed_for
@property
def public_state(self):
if self.state != AbstractState.submitted:
return getattr(AbstractPublicState, self.state.name)
elif self.reviews:
return AbstractPublicState.under_review
else:
return AbstractPublicState.awaiting
@property
def reviewing_state(self):
if not self.reviews:
return AbstractReviewingState.not_started
track_states = {x: self.get_track_reviewing_state(x) for x in self.reviewed_for_tracks}
positiveish_states = {AbstractReviewingState.positive, AbstractReviewingState.conflicting}
if any(x == AbstractReviewingState.not_started for x in track_states.values()):
return AbstractReviewingState.in_progress
elif all(x == AbstractReviewingState.negative for x in track_states.values()):
return AbstractReviewingState.negative
elif all(x in positiveish_states for x in track_states.values()):
if len(self.reviewed_for_tracks) > 1:
# Accepted for more than one track
return AbstractReviewingState.conflicting
elif any(x == AbstractReviewingState.conflicting for x in track_states.values()):
# The only accepted track is in conflicting state
return AbstractReviewingState.conflicting
else:
return AbstractReviewingState.positive
else:
return AbstractReviewingState.mixed
@property
def score(self):
scores = [x.score for x in self.reviews if x.score is not None]
if not scores:
return None
return sum(scores) / len(scores)
@property
def track_question_scores(self):
sums = defaultdict(Counter)
lens = defaultdict(Counter)
for r in self.reviews:
sums[r.track.id] += Counter(r.scores)
lens[r.track.id] += Counter(r.scores.keys())
return {track: {question: score / lens[track][question]
for question, score in scores.items()}
for track, scores in sums.items()}
@property
def data_by_field(self):
return {value.contribution_field_id: value for value in self.field_values}
@locator_property
def locator(self):
return dict(self.event.locator, abstract_id=self.id)
@locator.token
def locator(self):
return dict(self.event.locator, uuid=self.uuid)
@hybrid_property
def judgment_comment(self):
return MarkdownText(self._judgment_comment)
@judgment_comment.setter
def judgment_comment(self, value):
self._judgment_comment = value
@judgment_comment.expression
def judgment_comment(cls):
return cls._judgment_comment
@property
def verbose_title(self):
return f'#{self.friendly_id} ({self.title})'
@property
def is_in_final_state(self):
return self.state != AbstractState.submitted
@property
def modification_ended(self):
return self.event.cfa.modification_ended
def __repr__(self):
return format_repr(self, 'id', 'event_id', is_deleted=False, _text=text_to_repr(self.title))
def can_access(self, user):
if not user:
return False
if self.submitter == user:
return True
if self.event.can_manage(user, permission='abstracts'):
return True
if any(x.person.user == user for x in self.person_links):
return True
return self.can_judge(user) or self.can_convene(user) or self.can_review(user)
def can_comment(self, user, check_state=False):
if not user:
return False
if check_state and self.is_in_final_state:
return False
if not self.event.cfa.allow_comments:
return False
if self.user_owns(user) and self.event.cfa.allow_contributors_in_comments:
return True
return self.can_judge(user) or self.can_convene(user) or self.can_review(user)
def can_convene(self, user):
if not user:
return False
if not self.event.can_manage(user, permission='track_convener', explicit_permission=True):
return False
if self.event.can_manage(user, permission='convene_all_abstracts', explicit_permission=True):
return True
return any(track.can_manage(user, permission='convene', explicit_permission=True)
for track in self.reviewed_for_tracks)
def can_review(self, user, check_state=False):
# The total number of tracks/events a user is a reviewer for (indico-wide)
# is usually reasonably low so we just access the relationships instead of
# sending a more specific query which would need to be cached to avoid
# repeating it when performing this check on many abstracts.
if not user:
return False
if check_state and self.public_state not in (AbstractPublicState.under_review, AbstractPublicState.awaiting):
return False
if not self.event.can_manage(user, permission='abstract_reviewer', explicit_permission=True):
return False
if self.event.can_manage(user, permission='review_all_abstracts', explicit_permission=True):
return True
return any(track.can_manage(user, permission='review', explicit_permission=True)
for track in self.reviewed_for_tracks)
def can_judge(self, user, check_state=False):
if not user:
return False
elif check_state and self.state != AbstractState.submitted:
return False
elif self.event.can_manage(user, permission='abstracts'):
return True
elif self.event.cfa.allow_convener_judgment and self.can_convene(user):
return True
else:
return False
def can_change_tracks(self, user, check_state=False):
if check_state and self.is_in_final_state:
return False
if self.event.cfa.allow_convener_track_change and self.can_convene(user):
return True
return self.can_judge(user)
def can_edit(self, user):
if not user:
return False
manager_edit_states = (
AbstractPublicState.under_review,
AbstractPublicState.withdrawn,
AbstractPublicState.awaiting,
AbstractPublicState.invited,
)
if self.public_state in manager_edit_states and self.event.can_manage(user, permission='abstracts'):
return True
elif self.public_state not in (AbstractPublicState.awaiting, AbstractPublicState.invited):
return False
elif not self.user_owns(user) or not self.event.cfa.can_edit_abstracts(user):
return False
editing_allowed = self.event.cfa.allow_editing
author_type = next((x.author_type for x in self.person_links if x.person.user == user), None)
is_primary = author_type == AuthorType.primary
is_secondary = author_type == AuthorType.secondary
if user == self.submitter:
return True
elif editing_allowed == AllowEditingType.submitter_all:
return True
elif editing_allowed == AllowEditingType.submitter_primary and is_primary:
return True
elif editing_allowed == AllowEditingType.submitter_authors and (is_primary or is_secondary):
return True
return False
def can_withdraw(self, user, check_state=False):
if not user:
return False
elif (
self.event.can_manage(user, permission='abstracts') and
(not check_state or self.state != AbstractState.withdrawn)
):
return True
elif user == self.submitter and (not check_state or self.state == AbstractState.submitted):
return True
else:
return False
def can_see_reviews(self, user):
return self.can_judge(user) or self.can_convene(user)
def get_timeline(self, user=None):
comments = [x for x in self.comments if x.can_view(user)] if user else self.comments
reviews = [x for x in self.reviews if x.can_view(user)] if user else self.reviews
return sorted(chain(comments, reviews), key=attrgetter('created_dt'))
def get_track_reviewing_state(self, track):
if track not in self.reviewed_for_tracks:
raise ValueError('Abstract not in review for given track')
reviews = self.get_reviews(group=track)
if not reviews:
return AbstractReviewingState.not_started
rejections = any(x.proposed_action == AbstractAction.reject for x in reviews)
acceptances = {x for x in reviews if x.proposed_action == AbstractAction.accept}
if rejections and not acceptances:
return AbstractReviewingState.negative
elif acceptances and not rejections:
proposed_contrib_types = {x.proposed_contribution_type for x in acceptances
if x.proposed_contribution_type is not None}
if len(proposed_contrib_types) <= 1:
return AbstractReviewingState.positive
else:
return AbstractReviewingState.conflicting
else:
return AbstractReviewingState.mixed
def get_reviewed_for_groups(self, user, include_reviewed=False):
already_reviewed = {each.track for each in self.get_reviews(user=user)} if include_reviewed else set()
if self.event.can_manage(user, permission='review_all_abstracts', explicit_permission=True):
return self.reviewed_for_tracks | already_reviewed
reviewer_tracks = {track for track in self.reviewed_for_tracks
if track.can_manage(user, permission='review', explicit_permission=True)}
return reviewer_tracks | already_reviewed
def get_track_score(self, track):
if track not in self.reviewed_for_tracks:
raise ValueError('Abstract not in review for given track')
reviews = [x for x in self.reviews if x.track == track]
scores = [x.score for x in reviews if x.score is not None]
if not scores:
return None
return sum(scores) / len(scores)
def reset_state(self):
self.state = AbstractState.submitted
self.judgment_comment = ''
self.judge = None
self.judgment_dt = None
self.accepted_track = None
self.accepted_contrib_type = None
self.merged_into = None
self.duplicate_of = None
def user_owns(self, user):
if not user:
return None
return user == self.submitter or any(x.person.user == user for x in self.person_links)
def log(self, *args, **kwargs):
"""Log with prefilled metadata for the abstract."""
self.event.log(*args, meta={'abstract_id': self.id}, **kwargs)
| {
"content_hash": "7e40ed23c082cd68f809cace3abc0abf",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 119,
"avg_line_length": 37.75443786982248,
"alnum_prop": 0.6055559909098033,
"repo_name": "DirkHoffmann/indico",
"id": "08acc7d3719375d97ea1f7cc226fd6cf1b1b98f9",
"size": "25736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/abstracts/models/abstracts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import os
import unittest
import IECore
import Gaffer
import GafferImage
import sys
class ConstantTest( unittest.TestCase ) :
def testDefaultFormatHash( self ) :
s = Gaffer.ScriptNode()
n = GafferImage.Constant()
s.addChild( n )
with s.context():
h = n["out"].image().hash()
n["color"][0].setValue( .5 )
n["color"][1].setValue( .1 )
n["color"][2].setValue( .8 )
h2 = n["out"].image().hash()
self.assertNotEqual( h, h2 )
def testColourHash( self ) :
# Check that the hash changes when the colour does.
s = Gaffer.ScriptNode()
n = GafferImage.Constant()
s.addChild( n )
with s.context():
h = n["out"].image().hash()
n["color"][0].setValue( .5 )
n["color"][1].setValue( .1 )
n["color"][2].setValue( .8 )
h2 = n["out"].image().hash()
self.assertNotEqual( h, h2 )
def testFormatHash( self ) :
# Check that the data hash doesn't change when the format does.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
h1 = c["out"].channelData( "R", IECore.V2i( 0 ) ).hash()
c["format"].setValue( GafferImage.Format( 1920, 1080, 1. ) )
h2 = c["out"].channelData( "R", IECore.V2i( 0 ) ).hash()
self.assertEqual( h1, h2 )
def testTileHashes( self ) :
# Test that two tiles within the image have the same hash.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
c["color"][0].setValue( .5 )
self.assertEqual(
c["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
c["out"].channelDataHash( "R", IECore.V2i( GafferImage.ImagePlug().tileSize() ) ),
)
def testEnableBehaviour( self ) :
c = GafferImage.Constant()
self.assertTrue( c.enabledPlug().isSame( c["enabled"] ) )
self.assertEqual( c.correspondingInput( c["out"] ), None )
self.assertEqual( c.correspondingInput( c["color"] ), None )
self.assertEqual( c.correspondingInput( c["format"] ), None )
def testChannelNamesHash( self ) :
c = GafferImage.Constant()
h1 = c["out"]["channelNames"].hash()
c["color"].setValue( IECore.Color4f( 1, 0.5, 0.25, 1 ) )
h2 = c["out"]["channelNames"].hash()
self.assertEqual( h1, h2 )
def testSerialisationWithZeroAlpha( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Constant()
s["c"]["color"].setValue( IECore.Color4f( 0, 1, 0, 0 ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["c"]["color"].getValue(), IECore.Color4f( 0, 1, 0, 0 ) )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "6252f26b4ef5b967f71e555c345b3451",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 85,
"avg_line_length": 28.71590909090909,
"alnum_prop": 0.6264345073209339,
"repo_name": "davidsminor/gaffer",
"id": "cf3e2d1db11db57a4b8a668af5e36cbba630d341",
"size": "4401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferImageTest/ConstantTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
} |
""" SMA Indicator
"""
import math
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
class SMA(IndicatorUtils):
def analyze(self, historical_data, period_count=15):
"""Performs a SMA analysis on the historical data
Args:
historical_data (list): A matrix of historical OHCLV data.
period_count (int, optional): Defaults to 15. The number of data points to consider for
our simple moving average.
Returns:
pandas.DataFrame: A dataframe containing the indicators and hot/cold values.
"""
dataframe = self.convert_to_dataframe(historical_data)
sma_values = abstract.SMA(dataframe, period_count).to_frame()
sma_values.dropna(how='all', inplace=True)
sma_values.rename(columns={0: 'sma'}, inplace=True)
return sma_values
| {
"content_hash": "6bd220a4319cb470f880b55add921cde",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 99,
"avg_line_length": 29.5,
"alnum_prop": 0.6598870056497175,
"repo_name": "AbenezerMamo/crypto-signal",
"id": "daffd2440350b91ff68f9650e8e347e5fb4a70d4",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/analyzers/informants/sma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "49171"
}
],
"symlink_target": ""
} |
r"""Model definition."""
import tensorflow as tf
def build_model(image_size, bias_last=True, num_classes=1, squeeze=True):
"""Builds model."""
input_shape = (image_size, image_size, 3)
image = tf.keras.Input(shape=input_shape, name='input_image')
training = tf.keras.Input(shape=[], name='training')
x = tf.keras.layers.Conv2D(
128, (3, 3), strides=(1, 1), padding='valid', activation=None)(
image)
x = tf.keras.layers.BatchNormalization()(x, training)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(
128, (3, 3), strides=(2, 2), padding='valid', activation=None)(
x)
x = tf.keras.layers.BatchNormalization()(x, training)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(
256, (3, 3), strides=(2, 2), padding='valid', activation=None)(
x)
x = tf.keras.layers.BatchNormalization()(x, training)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(
256, (3, 3), strides=(2, 2), padding='valid', activation=None)(
x)
x = tf.keras.layers.BatchNormalization()(x, training)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(
512, (1, 1), strides=(1, 1), padding='valid', activation=None)(
x)
x = tf.keras.layers.BatchNormalization()(x, training)
x = tf.keras.layers.ReLU()(x)
# x = tf.keras.layers.Conv2D(64, (2, 2), padding='valid')(x)
x = tf.keras.layers.Flatten()(x)
last_layer_fc = tf.keras.layers.Dense(num_classes, use_bias=bias_last)
if squeeze:
x = tf.squeeze(last_layer_fc(x))
else:
x = last_layer_fc(x)
model = tf.keras.models.Model(
inputs=[image, training], outputs=x, name='model')
model.summary()
return model
| {
"content_hash": "b9932811ba3836f84be5af2f2d65aa42",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 73,
"avg_line_length": 33.3921568627451,
"alnum_prop": 0.6265413975337639,
"repo_name": "google-research/google-research",
"id": "f626ba54ad484c85c10ca5c09fa09391d676478c",
"size": "2311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "implicit_constrained_optimization/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""
Rockuefort reads playlists written in simple plaintext, searches your
music library, and outputs the songs matched by the playlist in a few
different ways.
Usage: rockuefort index [--add DIR|--remove DIR]
rockuefort scan
rockuefort list [--strip PREFIX] [--prepend PREFIX] [--null]
[--shuffle] <playlist>
rockuefort (copy|link) [--no-number] [--shuffle]
<playlist> <destination>
rockuefort render [--shuffle] <playlist> <outfile>
rockuefort check <playlist>
rockuefort --help
rockuefort --version
Options:
--add DIR Add DIR to the list of directories to scan
--null Terminate printed filenames with null characters
--prepend PREFIX Prepend PREFIX to each printed filename
--remove DIR Remove DIR from the list of directories to scan
--reset Forget previously indexed files
--shuffle Randomize the order of the output
--strip PREFIX Strip PREFIX from each printed filename
"""
from collections import namedtuple
import itertools
import logging
import math
import multiprocessing
import os
import pickle
import random
import re
import subprocess
import sys
import tempfile
from docopt import docopt
import mutagen
logger = logging.getLogger(__name__)
ACTIONS = {}
CACHE_PATH = os.path.expanduser("~/.cache/rockuefort/index")
DIRS_CONFIG_PATH = os.path.expanduser("~/.config/rockuefort/dirs")
KNOWN_OPTIONS = "@|+-"
PLAYLIST_LOAD_ARGS = "shuffle".split()
PREFERRED_EXTENSIONS = ".oga .ogg .mp3 .flac".split()
TAGS = "title artist album genre composer".split()
def main():
logging.basicConfig(
format="%(name)s:%(levelname)s: %(message)s",
level=logging.DEBUG,
)
args = docopt(__doc__, version="Rockuefort 1.1")
func = next(func for action, func in ACTIONS.items() if args[action])
return func(args)
def action(func):
"""Register func as an action that can be run from the command line
Trailing underscores are stripped from the function name so that it's
possible to have actions called things like 'list' without shadowing the
built-in.
"""
ACTIONS[func.__name__.rstrip('_')] = func
return func
@action
def check(args):
load_playlist(args["<playlist>"], **playlist_load_args(args))
@action
def copy(args):
files = load_playlist(args["<playlist>"], **playlist_load_args(args))
with tempfile.TemporaryDirectory() as temp_dir:
make_links(files, temp_dir, args["--no-number"])
logger.info("Performing a dry run of rsync...")
rsync_args = ["rsync", "--recursive", "--itemize-changes",
"--copy-links", "--times", "--delete", "--dry-run",
temp_dir + "/", args["<destination>"]]
call(rsync_args, ignore_return_code=True)
if confirm("Proceed with the rsync?"):
rsync_args.remove("--dry-run")
call(rsync_args, ignore_return_code=True)
@action
def index(args):
dirs, _ = load_dirs_config(DIRS_CONFIG_PATH)
if args["--add"]:
dirs.add(args["--add"])
elif args["--remove"]:
dirs.remove(args["--remove"])
else:
for dir in dirs:
print(dir)
return
os.makedirs(os.path.dirname(DIRS_CONFIG_PATH), exist_ok=True)
with open(DIRS_CONFIG_PATH, "w") as f:
f.write("\n".join(dirs) + "\n")
@action
def link(args):
files = load_playlist(args["<playlist>"], **playlist_load_args(args))
try:
os.mkdir(args["<destination>"])
except FileExistsError:
pass
make_links(files, args["<destination>"], args["--no-number"])
@action
def list_(args):
files = load_playlist(args["<playlist>"], **playlist_load_args(args))
for file in files:
if args["--strip"] and file.startswith(args["--strip"]):
file = file[len(args["--strip"]):]
if args["--prepend"]:
file = args["--prepend"] + file
print(file, end=("\0" if args["--null"] else "\n"))
@action
def render(args):
files = load_playlist(args["<playlist>"], **playlist_load_args(args))
with tempfile.TemporaryDirectory() as temp_dir:
commands = []
processed_files = []
# Pre-process each file to remove silences
max_digits = math.ceil(math.log10(len(files)))
for n, file in enumerate(files, start=1):
base, _ = os.path.splitext(os.path.basename(file))
out = os.path.join(temp_dir, ("{:0%sd}-{}.flac" % max_digits)
.format(n, base))
if file.gain:
volume_options = [
"--norm=%s" % file.gain,
]
else:
volume_options = []
if file.trim_positions:
trim = ["trim"] + ["=%s" % pos for pos in file.trim_positions]
else:
trim = []
sox_args = [
"sox",
"--no-clobber",
*volume_options,
file,
out,
*trim,
"silence", "1", "0.05", "0.1%", # remove silence at the beginning
"reverse",
"silence", "1", "0.05", "0.2%", # remove silence at the end
"reverse",
"channels", "2",
"rate", "44100",
]
commands.append(sox_args)
processed_files.append(out)
with multiprocessing.Pool() as pool:
pool.map(call, commands)
# Write out an empty file that we'll use first so that Sox won't copy
# any metadata into the final output.
empty_file = os.path.join(temp_dir, "empty.flac")
empty_args = [
"sox",
"-n",
"-r", "44100",
"-c", "2",
empty_file,
"trim", "0", "0",
]
call(empty_args)
# Concatenate the files
sox_args = [
"sox",
"--no-clobber",
empty_file,
]
for file in processed_files:
sox_args.append(file)
sox_args.append(args["<outfile>"])
call(sox_args)
@action
def scan(args):
mutable_cache = {}
# Open the cache file *before* scanning so that we haven't wasted time
# scanning if we find out the cache file can't be opened.
os.makedirs(os.path.dirname(CACHE_PATH), exist_ok=True)
dirs, excludes = load_dirs_config(DIRS_CONFIG_PATH)
with open(CACHE_PATH, "wb") as out:
for dir in dirs:
for base, dirnames, files in os.walk(dir):
if base in excludes:
logger.debug("excluding: %s", base)
dirnames[:] = []
continue
paths = (os.path.join(base, f) for f in files)
for path in paths:
logger.info(path)
try:
entry = CacheEntry.from_path(path)
except UnknownFileFormatError:
logger.debug("skipping: %s", path)
else:
mutable_cache[path] = entry
cache = tuple(mutable_cache.values())
pickle.dump(cache, out)
class CacheEntry(namedtuple("CacheEntry", ["path"] + TAGS)):
@classmethod
def from_path(cls, path):
mf = mutagen.File(path, easy=True)
if mf:
abspath = os.path.abspath(path)
info = {tag: mf.get(tag, []) for tag in TAGS}
info["path"] = abspath
return cls(**info)
else:
raise UnknownFileFormatError(path)
class FileWrapper(str):
def __new__(cls, *args, gain=None, trim_positions=None, **kwargs):
instance = super().__new__(cls, *args, **kwargs)
instance.gain = gain
instance.trim_positions = trim_positions
return instance
class MatchResult(list):
def __init__(self, file_or_list):
if isinstance(file_or_list, list):
super().__init__(file_or_list)
else:
super().__init__([file_or_list])
self.fixed_position = False
class GroupedMatchResult(MatchResult):
pass
class PlaylistEntry(namedtuple("PlaylistEntry", "query count options crop")):
_matcher = re.compile(
r"""(?P<options>[^\w]+)?
(?:(?P<count>[\d]+):)?
(?P<query>
[\w]+=[^|]+
(?:\|[\w]+=[^|]+)*
)""",
re.VERBOSE).fullmatch
@classmethod
def from_string(cls, string):
match = cls._matcher(string)
if match:
query_str, count, options = match.group("query", "count", "options")
count = int(count) if count is not None else 1
query_parts = [part.split("=", maxsplit=1) for part in query_str.split("|")]
query = [(tag, value) for tag, value in query_parts if tag != "crop"]
if not all(tag in TAGS for tag, _ in query):
raise QueryInvalidTagError
crop = next((value for tag, value in query_parts if tag == "crop"), None)
options = options or ''
unknown_options = set(options) - set(KNOWN_OPTIONS)
if unknown_options:
logger.warn("Ignoring unknown query options %r",
"".join(unknown_options))
return cls(query, count, options, crop)
else:
raise QueryParseError
class QueryInvalidTagError(Exception):
pass
class QueryParseError(Exception):
pass
class UnknownFileFormatError(Exception):
pass
def call(args, ignore_return_code=False):
logger.info(" ".join(args))
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as e:
if not ignore_return_code:
logger.error(e)
sys.exit(2)
def confirm(question):
while True:
answer = input("{} (Y/n): ".format(question))
if answer in "Yy":
# Note that this branch also wins when answer is empty
return True
elif answer in "Nn":
return False
def filter_extensions(files):
extensions = {}
for file in files:
base, ext = os.path.splitext(file)
extensions.setdefault(base, []).append(ext)
deduped = []
for base, exts in extensions.items():
ext = next((e for e in PREFERRED_EXTENSIONS if e in exts), exts[0])
deduped.append(base + ext)
return deduped
def playlist_load_args(args):
load_args = {}
for arg, value in args.items():
cleaned_arg = arg.lstrip('-').replace('-', '_')
if cleaned_arg in PLAYLIST_LOAD_ARGS:
load_args[cleaned_arg] = value
return load_args
def get_results(entries, cache=None):
results = []
for entry in entries:
matched_files = filter_extensions(match_files(entry.query, cache))
n = len(matched_files)
if n != entry.count:
file_info = "".join("\n match: %s" % f for f in matched_files)
logger.warn("Matched %s files (expected %s): %r%s",
n, entry.count, entry.query, file_info)
volume_adjustment = entry.options.count('+') - entry.options.count('-')
options = {}
if volume_adjustment:
try:
options["gain"] = 10 * math.log2(volume_adjustment + 8) - 30
except ValueError:
logger.warn("Ignoring out-of-bounds volume adjustment %r",
volume_adjustment)
if entry.crop:
options["trim_positions"] = entry.crop.split(',')
matched_files = [FileWrapper(file, **options) for file in matched_files]
if '|' in entry.options:
if results and isinstance(results[-1], GroupedMatchResult):
results[-1].extend(matched_files)
else:
results.append(GroupedMatchResult(matched_files))
else:
results.extend(MatchResult(file) for file in matched_files)
if '@' in entry.options:
results[-1].fixed_position = True
return results
def load_cache(path):
try:
with open(path, "rb") as f:
return pickle.load(f)
except FileNotFoundError:
logger.warn("No cache file found. You should run `rockuefort scan`.")
return Cache()
def load_dirs_config(path):
try:
with open(path) as f:
lines = f.read().splitlines()
except FileNotFoundError:
logger.warn("No config found at {} (will be created if needed)".format(path))
return set(), set()
dirs = set()
excludes = set()
for line in lines:
if line.startswith('-'):
excludes.add(os.path.normpath(line[1:]))
else:
dirs.add(os.path.normpath(line))
return dirs, excludes
def load_playlist(path, *, shuffle=False):
cache = load_cache(CACHE_PATH)
with open(path) as f:
entries = parse_entries(f)
results = get_results(entries, cache)
if shuffle:
results = shuffled(results)
return list(itertools.chain(*results))
def make_links(targets, dest_dir, no_number=False):
digits = len(str(len(targets)))
for i, target in enumerate(targets, 1):
basename = os.path.basename(target)
if not no_number:
basename = ("{:0%d}-{}" % digits).format(i, basename)
dest = os.path.join(dest_dir, basename)
try:
os.symlink(target, dest)
except FileExistsError:
logger.warn("File exists: %s", dest)
def match_files(query, cache):
matched_files = cache
for attr, value in query:
matched_files = [x for x in matched_files
if matches(value, getattr(x, attr))]
return [x.path for x in matched_files]
def matches(value, attr_list):
"""Return whether value matches the attribute described by attr_list
Attributes come from mutagen as lists of strings (except for the "path"
attribute). If the value is surrounded by double quotes, we look for exact
matches; otherwise, we join together the attribute list with Snowman
characters and then do a substring match against the joined string.
"""
if value.startswith('"') and value.endswith('"'):
return value[1:-1] in attr_list
combined_attr_values = "\N{SNOWMAN}".join(attr_list).lower()
return value.lower() in combined_attr_values
def parse_entries(lines):
entries = []
for line_number, line in enumerate(lines, start=1):
line = line.strip()
if not line or line.startswith("#"):
continue
try:
entry = PlaylistEntry.from_string(line)
except QueryInvalidTagError:
logger.warn("Ignoring query using invalid tag [line %s]: %r",
line_number, line)
except QueryParseError:
logger.warn("Ignoring invalid query [line %s]: %r",
line_number, line)
else:
entries.append(entry)
return entries
def shuffled(results):
fixed = []
non_fixed = []
for position, result in enumerate(results):
if result.fixed_position:
fixed.append((position, result))
else:
non_fixed.append(result)
random.shuffle(non_fixed)
for position, result in fixed:
non_fixed.insert(position, result)
return non_fixed
| {
"content_hash": "1a8796b3937af587bad9473d32ce12fb",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 89,
"avg_line_length": 32.2901878914405,
"alnum_prop": 0.5671429495053986,
"repo_name": "kalgynirae/rockuefort",
"id": "57ef3bca22f26e81a05dc29a2e94dd5091809df1",
"size": "15490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rockuefort/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15966"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
version = '1.0'
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
setup(name='lfc_developement_tools',
version=version,
description='Development tools for LFC',
long_description=README,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
keywords='django cms',
author='Kai Diefenbach',
author_email='kai.diefenbach@iqpp.de',
url='http://www.iqpp.de',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
],
)
| {
"content_hash": "692b3c3807cb33493cf6f7fdd262a8ea",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 54,
"avg_line_length": 29.34375,
"alnum_prop": 0.597444089456869,
"repo_name": "diefenbach/lfc-development-tools",
"id": "3516386cd4a875eb6f7585b4935baa0888036a91",
"size": "939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2891"
}
],
"symlink_target": ""
} |
""" A stylish alternative for caching your map tiles.
TileStache is a Python-based server application that can serve up map tiles
based on rendered geographic data. You might be familiar with TileCache
(http://tilecache.org), the venerable open source WMS server from MetaCarta.
TileStache is similar, but we hope simpler and better-suited to the needs of
designers and cartographers.
Documentation available at http://tilestache.org/doc/
"""
__version__ = 'N.N.N'
import re
from sys import stdout
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from StringIO import StringIO
from os.path import dirname, join as pathjoin, realpath
from datetime import datetime, timedelta
from urlparse import urljoin, urlparse
from urllib import urlopen
from os import getcwd
from time import time
import logging
try:
from json import load as json_load
except ImportError:
from simplejson import load as json_load
from ModestMaps.Core import Coordinate
# dictionary of configuration objects for requestLayer().
_previous_configs = {}
import Core
import Config
# regular expression for PATH_INFO
_pathinfo_pat = re.compile(r'^/?(?P<l>\w.+)/(?P<z>\d+)/(?P<x>-?\d+)/(?P<y>-?\d+)\.(?P<e>\w+)$')
_preview_pat = re.compile(r'^/?(?P<l>\w.+)/(preview\.html)?$')
def getTile(layer, coord, extension, ignore_cached=False):
""" Get a type string and tile binary for a given request layer tile.
Arguments:
- layer: instance of Core.Layer to render.
- coord: one ModestMaps.Core.Coordinate corresponding to a single tile.
- extension: filename extension to choose response type, e.g. "png" or "jpg".
- ignore_cached: always re-render the tile, whether it's in the cache or not.
This is the main entry point, after site configuration has been loaded
and individual tiles need to be rendered.
"""
start_time = time()
mimetype, format = layer.getTypeByExtension(extension)
cache = layer.config.cache
if not ignore_cached:
# Start by checking for a tile in the cache.
body = cache.read(layer, coord, format)
tile_from = 'cache'
else:
# Then look in the bag of recent tiles.
body = Core._getRecentTile(layer, coord, format)
tile_from = 'recent tiles'
# If no tile was found, dig deeper
if body is None:
try:
lockCoord = None
if layer.write_cache:
# this is the coordinate that actually gets locked.
lockCoord = layer.metatile.firstCoord(coord)
# We may need to write a new tile, so acquire a lock.
cache.lock(layer, lockCoord, format)
if not ignore_cached:
# There's a chance that some other process has
# written the tile while the lock was being acquired.
body = cache.read(layer, coord, format)
tile_from = 'cache after all'
if body is None:
# No one else wrote the tile, do it here.
buff = StringIO()
try:
tile = layer.render(coord, format)
save = True
except Core.NoTileLeftBehind, e:
tile = e.tile
save = False
if not layer.write_cache:
save = False
if format.lower() == 'jpeg':
save_kwargs = layer.jpeg_options
elif format.lower() == 'png':
save_kwargs = layer.png_options
else:
save_kwargs = {}
tile.save(buff, format, **save_kwargs)
body = buff.getvalue()
if save:
cache.save(body, layer, coord, format)
tile_from = 'layer.render()'
finally:
if lockCoord:
# Always clean up a lock when it's no longer being used.
cache.unlock(layer, lockCoord, format)
Core._addRecentTile(layer, coord, format, body)
logging.info('TileStache.getTile() %s/%d/%d/%d.%s via %s in %.3f', layer.name(), coord.zoom, coord.column, coord.row, extension, tile_from, time() - start_time)
return mimetype, body
def getPreview(layer):
""" Get a type string and dynamic map viewer HTML for a given layer.
"""
return 'text/html', Core._preview(layer)
def parseConfigfile(configpath):
""" Parse a configuration file and return a Configuration object.
Configuration file is formatted as JSON with two sections, "cache" and "layers":
{
"cache": { ... },
"layers": {
"layer-1": { ... },
"layer-2": { ... },
...
}
}
The full path to the file is significant, used to
resolve any relative paths found in the configuration.
See the Caches module for more information on the "caches" section,
and the Core and Providers modules for more information on the
"layers" section.
"""
config_dict = json_load(urlopen(configpath))
scheme, host, path, p, q, f = urlparse(configpath)
if scheme == '':
scheme = 'file'
path = realpath(path)
dirpath = '%s://%s%s' % (scheme, host, dirname(path).rstrip('/') + '/')
return Config.buildConfiguration(config_dict, dirpath)
def splitPathInfo(pathinfo):
""" Converts a PATH_INFO string to layer name, coordinate, and extension parts.
Example: "/layer/0/0/0.png", leading "/" optional.
"""
if pathinfo == '/':
return None, None, None
if _pathinfo_pat.match(pathinfo or ''):
path = _pathinfo_pat.match(pathinfo)
layer, row, column, zoom, extension = [path.group(p) for p in 'lyxze']
coord = Coordinate(int(row), int(column), int(zoom))
elif _preview_pat.match(pathinfo or ''):
path = _preview_pat.match(pathinfo)
layer, extension = path.group('l'), 'html'
coord = None
else:
raise Core.KnownUnknown('Bad path: "%s". I was expecting something more like "/example/0/0/0.png"' % pathinfo)
return layer, coord, extension
def mergePathInfo(layer, coord, extension):
""" Converts layer name, coordinate and extension back to a PATH_INFO string.
See also splitPathInfo().
"""
z = coord.zoom
x = coord.column
y = coord.row
return '/%(layer)s/%(z)d/%(x)d/%(y)d.%(extension)s' % locals()
def requestLayer(config, path_info):
""" Return a Layer.
Requires a configuration and PATH_INFO (e.g. "/example/0/0/0.png").
Config parameter can be a file path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
"""
if type(config) in (str, unicode):
#
# Should be a path to a configuration file we can load;
# build a tuple key into previously-seen config objects.
#
key = hasattr(config, '__hash__') and (config, getcwd())
if key in _previous_configs:
config = _previous_configs[key]
else:
config = parseConfigfile(config)
if key:
_previous_configs[key] = config
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
# ensure that path_info is at least a single "/"
path_info = '/' + (path_info or '').lstrip('/')
if path_info == '/':
return Core.Layer(config, None, None)
layername = splitPathInfo(path_info)[0]
if layername not in config.layers:
raise Core.KnownUnknown('"%s" is not a layer I know about. Here are some that I do know about: %s.' % (layername, ', '.join(sorted(config.layers.keys()))))
return config.layers[layername]
def requestHandler(config_hint, path_info, query_string):
""" Generate a mime-type and response body for a given request.
Requires a configuration and PATH_INFO (e.g. "/example/0/0/0.png").
Config_hint parameter can be a path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
Query string is optional, currently used for JSON callbacks.
Calls getTile() to render actual tiles, and getPreview() to render preview.html.
"""
try:
# ensure that path_info is at least a single "/"
path_info = '/' + (path_info or '').lstrip('/')
layer = requestLayer(config_hint, path_info)
query = parse_qs(query_string or '')
try:
callback = query['callback'][0]
except KeyError:
callback = None
#
# Special case for index page.
#
if path_info == '/':
return getattr(layer.config, 'index', ('text/plain', 'TileStache says hello.'))
coord, extension = splitPathInfo(path_info)[1:]
if path_info == '/':
raise Exception(path_info)
elif extension == 'html' and coord is None:
mimetype, content = getPreview(layer)
elif extension.lower() in layer.redirects:
other_extension = layer.redirects[extension.lower()]
other_path_info = mergePathInfo(layer.name(), coord, other_extension)
raise Core.TheTileIsInAnotherCastle(other_path_info)
else:
mimetype, content = getTile(layer, coord, extension)
if callback and 'json' in mimetype:
mimetype, content = 'application/javascript; charset=utf-8', '%s(%s)' % (callback, content)
except Core.KnownUnknown, e:
out = StringIO()
print >> out, 'Known unknown!'
print >> out, e
print >> out, ''
print >> out, '\n'.join(Core._rummy())
mimetype, content = 'text/plain', out.getvalue()
return mimetype, content
def cgiHandler(environ, config='./tilestache.cfg', debug=False):
""" Read environment PATH_INFO, load up configuration, talk to stdout by CGI.
Calls requestHandler().
Config parameter can be a file path string for a JSON configuration file
or a configuration object with 'cache', 'layers', and 'dirpath' properties.
"""
if debug:
import cgitb
cgitb.enable()
path_info = environ.get('PATH_INFO', None)
query_string = environ.get('QUERY_STRING', None)
try:
mimetype, content = requestHandler(config, path_info, query_string)
except Core.TheTileIsInAnotherCastle, e:
other_uri = environ['SCRIPT_NAME'] + e.path_info
if query_string:
other_uri += '?' + query_string
print >> stdout, 'Status: 302 Found'
print >> stdout, 'Location:', other_uri
print >> stdout, 'Content-Type: text/plain\n'
print >> stdout, 'You are being redirected to', other_uri
return
layer = requestLayer(config, path_info)
if layer.allowed_origin:
print >> stdout, 'Access-Control-Allow-Origin:', layer.allowed_origin
if layer.max_cache_age is not None:
expires = datetime.utcnow() + timedelta(seconds=layer.max_cache_age)
print >> stdout, 'Expires:', expires.strftime('%a %d %b %Y %H:%M:%S GMT')
print >> stdout, 'Cache-Control: public, max-age=%d' % layer.max_cache_age
print >> stdout, 'Content-Length: %d' % len(content)
print >> stdout, 'Content-Type: %s\n' % mimetype
print >> stdout, content
class WSGITileServer:
""" Create a WSGI application that can handle requests from any server that talks WSGI.
The WSGI application is an instance of this class. Example:
app = WSGITileServer('/path/to/tilestache.cfg')
werkzeug.serving.run_simple('localhost', 8080, app)
"""
def __init__(self, config, autoreload=False):
""" Initialize a callable WSGI instance.
Config parameter can be a file path string for a JSON configuration
file or a configuration object with 'cache', 'layers', and
'dirpath' properties.
Optional autoreload boolean parameter causes config to be re-read
on each request, applicable only when config is a JSON file.
"""
if type(config) in (str, unicode):
self.autoreload = autoreload
self.config_path = config
try:
self.config = parseConfigfile(config)
except:
print "Error loading Tilestache config:"
raise
else:
assert hasattr(config, 'cache'), 'Configuration object must have a cache.'
assert hasattr(config, 'layers'), 'Configuration object must have layers.'
assert hasattr(config, 'dirpath'), 'Configuration object must have a dirpath.'
self.autoreload = False
self.config_path = None
self.config = config
def __call__(self, environ, start_response):
"""
"""
if self.autoreload: # re-parse the config file on every request
try:
self.config = parseConfigfile(self.config_path)
except Exception, e:
raise Core.KnownUnknown("Error loading Tilestache config file:\n%s" % str(e))
try:
layer, coord, ext = splitPathInfo(environ['PATH_INFO'])
except Core.KnownUnknown, e:
return self._response(start_response, '400 Bad Request', str(e))
if layer and layer not in self.config.layers:
return self._response(start_response, '404 Not Found')
try:
mimetype, content = requestHandler(self.config, environ['PATH_INFO'], environ['QUERY_STRING'])
except Core.TheTileIsInAnotherCastle, e:
other_uri = environ['SCRIPT_NAME'] + e.path_info
if environ['QUERY_STRING']:
other_uri += '?' + environ['QUERY_STRING']
start_response('302 Found', [('Location', other_uri), ('Content-Type', 'text/plain')])
return ['You are being redirected to %s\n' % other_uri]
request_layer = requestLayer(self.config, environ['PATH_INFO'])
allowed_origin = request_layer.allowed_origin
max_cache_age = request_layer.max_cache_age
return self._response(start_response, '200 OK', str(content), mimetype, allowed_origin, max_cache_age)
def _response(self, start_response, code, content='', mimetype='text/plain', allowed_origin='', max_cache_age=None):
"""
"""
headers = [('Content-Type', mimetype), ('Content-Length', str(len(content)))]
if allowed_origin:
headers.append(('Access-Control-Allow-Origin', allowed_origin))
if max_cache_age is not None:
expires = datetime.utcnow() + timedelta(seconds=max_cache_age)
headers.append(('Expires', expires.strftime('%a %d %b %Y %H:%M:%S GMT')))
headers.append(('Cache-Control', 'public, max-age=%d' % max_cache_age))
start_response(code, headers)
return [content]
def modpythonHandler(request):
""" Handle a mod_python request.
Calls requestHandler().
Example Apache configuration for TileStache:
<Directory /home/migurski/public_html/TileStache>
AddHandler mod_python .py
PythonHandler TileStache::modpythonHandler
PythonOption config /etc/tilestache.cfg
</Directory>
Configuration options, using PythonOption directive:
- config: path to configuration file, defaults to "tilestache.cfg",
using request.filename as the current working directory.
"""
from mod_python import apache
config_path = request.get_options().get('config', 'tilestache.cfg')
config_path = realpath(pathjoin(dirname(request.filename), config_path))
path_info = request.path_info
query_string = request.args
mimetype, content = requestHandler(config_path, path_info, query_string)
request.status = apache.HTTP_OK
request.content_type = mimetype
request.set_content_length(len(content))
request.send_http_header()
request.write(content)
return apache.OK
| {
"content_hash": "931edbc1d014dcaa75f09eeeb720b2d1",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 164,
"avg_line_length": 35.78768577494692,
"alnum_prop": 0.5932012339819649,
"repo_name": "mojodna/TileStache",
"id": "038445f5a327c4a1b22ebf2e356af32543e99f9f",
"size": "16856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TileStache/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "422506"
},
{
"name": "Shell",
"bytes": "130"
}
],
"symlink_target": ""
} |
import functools
import six
import imath
from collections import namedtuple, OrderedDict
import IECore
import Gaffer
import GafferUI
import GafferImage
import GafferImageUI
## \todo
## Ideally Catalogue reordering wouldn't just be managed the UI layer, but
## the scope of changing this is somewhat large. When we need to do folders,
## then this will probably force us to sort this out. For now, is at least
## contained within just this file...
##########################################################################
# Column Configuration
#
# All of the Catalogue's columns are configurable. Gaffer provides a default
# set of columns that can be extended or replaced via the registration of new
# names or re-registration of existing ones.
#
# The columns displayed by a Catalogue are controlled by the
# "catalogue:columns" [StringVectorData] metadata on it's `imageIndex` plug.
# Default columns are stored via a class registration, ie:
#
# Gaffer.Metadata.registerValue(
# GafferImage.Catalogue, "imageIndex",
# "catalogue:columns", [ ... ]
# )
#
# This should consist of an ordered list of the `names` of columns registered
# via the `CatalogueUI.registerColumns` method.
##########################################################################
__registeredColumns = OrderedDict()
_columnsMetadataKey = "catalogue:columns"
# The Column class defines a single column in the Catalogue UI.
# The column class is responsible for providing its header title, and the
# displayed value for each image in the Catalogue.
class Column :
def __init__( self, title ) :
self.__title = title
def title( self ) :
return self.__title
# Must be implemented by all column classes. It should return basic-typed
# IECore.Data (incl. DateTimeData) which will be presented as a string in
# the Catalogue UI. The method is called with:
#
# - image : A GafferImage.Catalogue.Image plug (not to be confused with
# a standard ImagePlug).
#
# - catalogue: The GafferImage.Catalogue instance attached to the UI.
#
# A suitable context is scoped around the call such that catalogue["out"] will
# provide the image for the row the value is being generated for, rather than
# the user's current selection.
def value( self, image, catalogue ) :
raise NotImplementedError
# A abstract base column type for Columns that wish to present an image rather
# than a text value
class IconColumn( Column ) :
# Columns that derive from IconColumn should instead return the name of an
# image on Gaffer's image path, see Column.value for details on the arguments
# passed to this method, and the calling context.
def value( self, image, catalogue ) :
raise NotImplementedError
# An abstract base column type for Columns that can derive their value with
# simple callables or lamdas, eg:
#
# column = SimpleColumn( "Name", lambda image, _ : image.getName() ) )
#
class SimpleColumn( Column ) :
def __init__( self, title, valueProvider ) :
Column.__init__( self, title )
self.__valueProvider = valueProvider
def value( self, image, catalogue ) :
return self.__valueProvider( image, catalogue )
# Register a new column or overwrite an existing column. Registered columns
# appear in the Catalogue header context menu, and can be set as default
# columns in the "catalogue:columns" metadata on Catalogue's `imageIndex` plug.
# The registered name is used for the menu path when presenting available columns
# to the user. As such, it can contain `/` for sub-menus and should be formatted
# with appropriate case/spaces.
def registerColumn( name, column ) :
__registeredColumns[ name ] = column
# Removes a column. It will no longer show up in the Catalogue UI and can't be
# set as a default column.
def deregisterColumn( name ) :
if name in __registeredColumns :
del __registeredColumns[ name ]
# Returns the instance of a Column class registered for `name` or
# None if no column has been registered.
def column( name ) :
return __registeredColumns.get( name, None )
# Returns all registered column names
def registeredColumns() :
return __registeredColumns.keys()
#
# Convenience Column classes
#
# A Columns class that retrieves its value from the catalogue item's image
# metadata. If multiple names are provided, the first one present will be used,
# allowing a single column to support several source names depending on the
# image's origin.
class ImageMetadataColumn( Column ) :
def __init__( self, title, nameOrNames, defaultValue = None ) :
Column.__init__( self, title )
if isinstance( nameOrNames, six.string_types ) :
nameOrNames = [ nameOrNames, ]
self.__names = nameOrNames
self.__defaultValue = defaultValue
def value( self, image, catalogue ) :
metadata = catalogue["out"].metadata()
for name in self.__names :
value = metadata.get( name, None )
if value is not None :
return value
return self.__defaultValue
# A Column class that retrieves its value from render-time context variable
# values passed through the catalogue item's image metadata. If multiple names
# are provided, the first present context entry will be used. Note: Not all
# context variables are available via image metadata, the exact list is renderer
# dependent, but it is generally limited to basic value types
class ContextVariableColumn( ImageMetadataColumn ) :
def __init__( self, title, nameOrNames, defaultValue = None ) :
if isinstance( nameOrNames, six.string_types ) :
nameOrNames = [ nameOrNames, ]
names = [ "gaffer:context:%s" % name for name in nameOrNames ]
ImageMetadataColumn.__init__( self, title, names, defaultValue )
#
# Standard Columns
#
class __StatusIconColumn( IconColumn ) :
def __init__( self ) :
IconColumn.__init__( self, "" )
def value( self, image, catalogue ) :
fileName = image["fileName"].getValue()
if fileName :
# Attempt to read the metadata to check the image is loadable. Given other columns
# are going to do this anyway, we're not adding too much overhead here.
try :
catalogue["out"].metadata()
except Gaffer.ProcessException :
return "errorNotificationSmall"
return "catalogueStatusDisk"
return "catalogueStatusDisplay"
registerColumn( "Status", __StatusIconColumn() )
registerColumn( "Name", SimpleColumn( "Name", lambda image, _ : image.getName() ) )
registerColumn( "Frame", ContextVariableColumn( "Frame", "frame" ) )
registerColumn( "Description", ImageMetadataColumn( "Description", "ImageDescription" ) )
# Image properties
def __resolutionColumnValueProvider( image, catalogue ) :
format_ = catalogue["out"].format()
return "%d x %d" % ( format_.width(), format_.height() )
def __formatBox( box ) :
return "(%d %d) - (%d, %d)" % ( box.min().x, box.min().y, box.max().x, box.max().y )
registerColumn(
"Image/Resolution",
SimpleColumn( "Resolution", __resolutionColumnValueProvider )
)
registerColumn(
"Image/Channels",
SimpleColumn( "Channels", lambda _, c : ", ".join( c["out"].channelNames() ) )
)
registerColumn(
"Image/Type",
SimpleColumn( "Image Type", lambda _, c : "Deep" if c["out"].deep() else "Flat" )
)
registerColumn(
"Image/Pixel Aspect Ratio",
SimpleColumn( "P. Aspect", lambda _, c : c["out"].format().getPixelAspect() )
)
registerColumn(
"Image/Data Window",
SimpleColumn( "Data Window", lambda _, c : __formatBox( c["out"].dataWindow() ) )
)
registerColumn(
"Image/Display Window",
SimpleColumn( "Display Window", lambda _, c : __formatBox( c["out"].format().getDisplayWindow() ) )
)
# Default visible column set
Gaffer.Metadata.registerValue(
GafferImage.Catalogue, "imageIndex", _columnsMetadataKey,
IECore.StringVectorData( [ "Status", "Name" ] )
)
##########################################################################
# Node registration
##########################################################################
Gaffer.Metadata.registerNode(
GafferImage.Catalogue,
"description",
"""
Stores a catalogue of images to be browsed. Images can either be loaded
from files or rendered directly into the catalogue.
To send a live render to a Catalogue, an "ieDisplay" output definition
should be used with the following parameters :
- driverType : "ClientDisplayDriver"
- displayHost : host name ("localhost" is sufficient for local renders)
- displayPort : `GafferImage.Catalogue.displayDriverServer().portNumber()`
- remoteDisplayType : "GafferImage::GafferDisplayDriver"
- catalogue:name : The name of the catalogue to render to (optional)
""",
plugs = {
"images" : [
"description",
"""
Specifies the list of images currently
stored in the catalogue.
Either add images interactively
using the UI, or use the API to construct
Catalogue.Image plugs and parent them
here.
""",
"plugValueWidget:type", "",
],
"imageIndex" : [
"description",
"""
Specifies the index of the currently
selected image. This forms the output
from the catalogue node.
""",
"plugValueWidget:type", "GafferImageUI.CatalogueUI._ImageListing",
"label", "",
"layout:section", "Images",
],
"name" : [
"description",
"""
Used to distinguish between catalogues, so that when
multiple catalogues exist, it is possible to send a
render to just one of them. Renders are matched
to catalogues by comparing the "catalogue:name" parameter
from the renderer output with the value of this plug.
""",
],
"directory" : [
"description",
"""
The directory where completed renders
are saved. This allows them to remain
in the catalogue for the next session.
""",
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:leaf", False,
],
},
)
Gaffer.Metadata.registerValue( GafferImage.Catalogue.Image, "renameable", True )
##########################################################################
# Viewer hot-keys
##########################################################################
def addCatalogueHotkeys( editor ) :
if not isinstance( editor, GafferUI.Viewer ) :
return
editor.keyPressSignal().connect( __viewerKeyPress, scoped = False )
def __viewerKeyPress( viewer, event ) :
# Up/Down arrows need to walk upstream of the viewer input and look for
# a Catalogue node and increment/decrement its active index
if event.key not in ( "Down", "Up" ) :
return False
if not isinstance( viewer.view(), GafferImageUI.ImageView ) :
return False
catalogue = Gaffer.NodeAlgo.findUpstream(
viewer.view(),
lambda node : isinstance( node, GafferImage.Catalogue ),
order = Gaffer.NodeAlgo.VisitOrder.DepthFirst
)
if catalogue is None :
return False
__incrementImageIndex( catalogue, event.key )
return True
def __incrementImageIndex( catalogue, direction ) :
indexPlug = catalogue["imageIndex"].source()
if Gaffer.MetadataAlgo.readOnly( indexPlug ) or not indexPlug.settable() :
return
# Match the UI's top-to-bottom order instead of 'up is a larger number'
increment = -1 if direction == "Up" else 1
# The Catalog UI re-orders images internally using metadata, rather than by
# shuffling plugs. As such, we can't just set imageIndex. We don't want to
# be poking into the specifics of how this works, so for now we re-use
# _ImagesPath as it knows all that logic.
images = catalogue["images"].source().children()
if len( images ) == 0 :
return
maxIndex = len( images ) - 1
orderedImages = _ImagesPath( catalogue["images"].source(), [] )._orderedImages()
# There are times when this can be out of sync with the number of images.
# Generally when the UI hasn't been opened.
currentPlugIndex = min( indexPlug.getValue(), maxIndex )
catalogueIndex = orderedImages.index( images[currentPlugIndex] )
nextIndex = max( min( catalogueIndex + increment, maxIndex ), 0 )
nextPlugIndex = images.index( orderedImages[nextIndex] )
if nextPlugIndex != currentPlugIndex :
indexPlug.setValue( nextPlugIndex )
##########################################################################
# _CataloguePath
##########################################################################
def _findSourceCatalogue( imagesPlug ) :
def walk( plug ) :
if isinstance( plug.parent(), GafferImage.Catalogue ) :
return plug.parent()
for output in plug.outputs() :
r = walk( output )
if r is not None :
return r
return None
return walk( imagesPlug )
class _ImagesPath( Gaffer.Path ) :
indexMetadataName = 'image:index'
def __init__( self, images, path, root = "/", filter = None ) :
Gaffer.Path.__init__( self, path, root, filter )
self.__images = images
self.__catalogue = _findSourceCatalogue( images )
def copy( self ) :
return self.__class__( self.__images, self[:], self.root(), self.getFilter() )
def isLeaf( self ) :
return len( self ) > 0
def propertyNames( self ) :
return Gaffer.Path.propertyNames( self ) + registeredColumns()
def property( self, name ) :
if name not in registeredColumns() :
return Gaffer.Path.property( self, name )
definition = column( name )
imageName = self[ -1 ]
image = self.__images[ imageName ]
# The Catalog API supports overriding the active image
# via a context variable, this allows the value provider
# to just use catalog["out"] to get to the correct image
# without needing to understand the internal workings.
with Gaffer.Context( self.__catalogue.scriptNode().context() ) as context :
context[ "catalogue:imageName" ] = imageName
try :
return definition.value( image, self.__catalogue )
except Gaffer.ProcessException :
# Suppress error. The GraphEditor will be displaying the
# error anyway, as will the standard type column, and we're
# not in a position to do anything more helpful.
return None
def _orderedImages( self ) :
# Avoid repeat lookups for plugs with no ui index by first getting all
# images with their plug indices, then updating those with any metadata
imageAndIndices = [ [ image, plugIndex ] for plugIndex, image in enumerate( self.__images.children() ) ]
for imageAndIndex in imageAndIndices :
uiIndex = Gaffer.Metadata.value( imageAndIndex[0], _ImagesPath.indexMetadataName )
if uiIndex is not None :
imageAndIndex[1] = uiIndex
return [ i[0] for i in sorted( imageAndIndices, key = lambda i : i[1] ) ]
def _children( self ) :
if len( self ) != 0 :
return []
return [
self.__class__( self.__images, [ image.getName() ], self.root(), self.getFilter() )
for image in self._orderedImages()
]
def _pathChangedSignalCreated( self ) :
Gaffer.Path._pathChangedSignalCreated( self )
# Connect to all the signals we need to in order
# to emit pathChangedSignal at the appropriate times.
self.__childAddedConnection = self.__images.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAdded ) )
self.__childRemovedConnection = self.__images.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childRemoved ) )
self.__cataloguePlugDirtiedConnection = self.__catalogue.plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__cataloguePlugDirtied ) )
self.__nameChangedConnections = {
image : image.nameChangedSignal().connect( Gaffer.WeakMethod( self.__nameChanged ) )
for image in self.__images
}
@staticmethod
def _updateUIIndices( orderedImages ) :
for i, image in enumerate( orderedImages ) :
Gaffer.Metadata.registerValue( image, _ImagesPath.indexMetadataName, i )
def __childAdded( self, parent, child ) :
assert( parent.isSame( self.__images ) )
self.__nameChangedConnections[child] = child.nameChangedSignal().connect( Gaffer.WeakMethod( self.__nameChanged ) )
self._emitPathChanged()
def __childRemoved( self, parent, child ) :
assert( parent.isSame( self.__images ) )
del self.__nameChangedConnections[child]
self._emitPathChanged()
def __nameChanged( self, child ) :
self._emitPathChanged()
def __cataloguePlugDirtied( self, plug ):
if plug.ancestor( GafferImage.Catalogue.Image ) :
self._emitPathChanged()
##########################################################################
# _ImageListing
##########################################################################
class _ImageListing( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__column = GafferUI.ListContainer( spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, self.__column, plug, **kw )
with self.__column :
columns = self.__listingColumns()
self.__pathListing = GafferUI.PathListingWidget(
_ImagesPath( self.__images(), [] ),
columns = columns,
allowMultipleSelection = True,
sortable = False,
horizontalScrollMode = GafferUI.ScrollMode.Automatic
)
self.__pathListing.setDragPointer( "" )
self.__pathListing.setHeaderVisible( True )
self.__pathListing.selectionChangedSignal().connect(
Gaffer.WeakMethod( self.__pathListingSelectionChanged ), scoped = False
)
self.__pathListing.dragEnterSignal().connect(
Gaffer.WeakMethod( self.__pathListingDragEnter ), scoped = False
)
self.__pathListing.dragLeaveSignal().connect(
Gaffer.WeakMethod( self.__pathListingDragLeave ), scoped = False
)
self.__pathListing.dragMoveSignal().connect(
Gaffer.WeakMethod( self.__pathListingDragMove ), scoped = False
)
self.__pathListing.dropSignal().connect(
Gaffer.WeakMethod( self.__pathListingDrop ), scoped = False
)
self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
addButton = GafferUI.Button( image = "pathChooser.png", hasFrame = False, toolTip = "Load image" )
addButton.clickedSignal().connect( Gaffer.WeakMethod( self.__addClicked ), scoped = False )
self.__duplicateButton = GafferUI.Button( image = "duplicate.png", hasFrame = False, toolTip = "Duplicate selected image, hold <kbd>alt</kbd> to view copy." )
self.__duplicateButton.setEnabled( False )
self.__duplicateButton.clickedSignal().connect( Gaffer.WeakMethod( self.__duplicateClicked ), scoped = False )
self.__exportButton = GafferUI.Button( image = "export.png", hasFrame = False, toolTip = "Export selected image" )
self.__exportButton.setEnabled( False )
self.__exportButton.clickedSignal().connect( Gaffer.WeakMethod( self.__exportClicked ), scoped = False )
self.__extractButton = GafferUI.Button( image = "extract.png", hasFrame = False, toolTip = "Create CatalogueSelect node for selected image" )
self.__extractButton.setEnabled( False )
self.__extractButton.clickedSignal().connect( Gaffer.WeakMethod( self.__extractClicked ), scoped = False )
GafferUI.Spacer( imath.V2i( 0 ), parenting = { "expand" : True } )
self.__removeButton = GafferUI.Button( image = "delete.png", hasFrame = False, toolTip = "Remove selected image" )
self.__removeButton.setEnabled( False )
self.__removeButton.clickedSignal().connect( Gaffer.WeakMethod( self.__removeClicked ), scoped = False )
GafferUI.Divider()
with GafferUI.Collapsible( label = "Image Properties", collapsed = False ) :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 ) :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.Label( "Name" )
self.__nameWidget = GafferUI.NameWidget( graphComponent = None )
GafferUI.Label( "Description" )
self.__descriptionWidget = GafferUI.MultiLineStringPlugValueWidget( plug = None )
Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataValueChanged ), scoped = False )
self.contextMenuSignal().connect( Gaffer.WeakMethod( self.__contextMenu ), scoped = False )
self._updateFromPlug()
def getToolTip( self ) :
# Suppress the default imageIndex tool-tip until we can do something
# more intelligent. We can't use setToolTip as PlugValueWidget defaults
# to the plug description for 'falsy' values.
return None
def _updateFromPlug( self ) :
with self.getContext() :
index = self.getPlug().getValue()
images = self.__images()
if len( images ) :
image = images[index % len( images )]
indices = self.__indicesFromSelection()
if index not in indices :
self.__pathListing.setSelection( IECore.PathMatcher( [ "/" + image.getName() ] ) )
self.__descriptionWidget.setPlug( image["description"] )
self.__nameWidget.setGraphComponent( image )
else :
self.__descriptionWidget.setPlug( None )
self.__nameWidget.setGraphComponent( None )
self.__column.setEnabled( self._editable() )
def __plugMetadataValueChanged( self, typeId, plugPath, key, plug ) :
if key != _columnsMetadataKey :
return
if plug and not plug.isSame( self.getPlug() ) :
return
self.__pathListing.setColumns( self.__listingColumns() )
def __getColumns( self ) :
# Support for promoted plugs.
# The plug data may have been reset, or it may have been promoted in a
# previous version of gaffer. As such, we can't assume there is a
# registered class value for our plug. Fall back on the Catalogue nodes
# plug's value as this will consider the class default columns value.
## \todo Refactor when we get metadata delegation for promoted plugs
return Gaffer.Metadata.value( self.getPlug(), _columnsMetadataKey ) \
or Gaffer.Metadata.value( self.__catalogue()["imageIndex"], _columnsMetadataKey )
def __setColumns( self, columns ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.getPlug(), _columnsMetadataKey, IECore.StringVectorData( columns ) )
def __resetColumns( self, *unused ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.deregisterValue( self.getPlug(), _columnsMetadataKey )
def __toggleColumn( self, column, visible ) :
columns = list( self.__getColumns() )
if visible :
columns.append( column )
else :
columns.remove( column )
self.__setColumns( columns )
def __listingColumns( self ) :
columns = []
for name in self.__getColumns() :
definition = column( name )
if not definition :
IECore.msg(
IECore.Msg.Level.Error,
"GafferImageUI.CatalogueUI", "No column registered with name '%s'" % name
)
continue
if isinstance( definition, IconColumn ) :
c = GafferUI.PathListingWidget.IconColumn( definition.title(), "", name )
else :
c = GafferUI.PathListingWidget.StandardColumn( definition.title(), name )
columns.append( c )
return columns
def __catalogue( self ) :
return _findSourceCatalogue( self.getPlug() )
def __images( self ) :
return self.__catalogue()["images"].source()
def __orderedImages( self ) :
return _ImagesPath( self.__images(), [] )._orderedImages()
def __indicesFromSelection( self ) :
indices = []
selection = self.__pathListing.getSelection()
for i, image in enumerate( self.__images() ) :
if selection.match( "/" + image.getName() ) & selection.Result.ExactMatch :
indices.append( i )
return indices
def __pathListingSelectionChanged( self, pathListing ) :
indices = self.__indicesFromSelection()
self.__removeButton.setEnabled( bool( indices ) )
self.__extractButton.setEnabled( bool( indices ) )
self.__exportButton.setEnabled( len( indices ) == 1 )
self.__duplicateButton.setEnabled( bool( indices ) )
if not indices :
# No selection. This happens when the user renames
# an image, because the selection is name based.
# Calling _updateFromPlug() causes us to reselect
# the correct image based on the value of the index
# plug.
self._updateFromPlug()
else :
# Deliberately not using an UndoScope as the user thinks
# of this as making a selection, not changing a plug value.
if self._editable() :
if self.getPlug().getValue() not in indices :
self.getPlug().setValue( indices[0] )
def __addClicked( self, *unused ) :
bookmarks = GafferUI.Bookmarks.acquire( self, category="image" )
path = Gaffer.FileSystemPath( bookmarks.getDefault( self ) )
path.setIncludeSequences( True )
path.setFilter(
Gaffer.FileSystemPath.createStandardFilter(
GafferImage.ImageReader.supportedExtensions(),
"Show only image files",
includeSequenceFilter = True,
)
)
dialogue = GafferUI.PathChooserDialogue( path, title = "Add image", confirmLabel = "Add", valid = True, leaf = True, bookmarks = bookmarks )
dialogue.pathChooserWidget().pathListingWidget().setColumns(
dialogue.pathChooserWidget().pathListingWidget().getColumns() +
[ GafferUI.PathListingWidget.StandardColumn( "Frame Range", "fileSystem:frameRange" ) ]
)
path = dialogue.waitForPath( parentWindow = self.ancestor( GafferUI.Window ) )
if not path :
return
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.__images().addChild( GafferImage.Catalogue.Image.load( str( path ) ) )
self.getPlug().setValue( len( self.__images() ) - 1 )
def __uiIndexToIndex( self, index ) :
target = self.__orderedImages()[ index ]
for i, image in enumerate( self.__images() ) :
if image.isSame( target ) :
return i
def __removeClicked( self, *unused ) :
indices = self.__indicesFromSelection()
# If the user repeatedly clicks the delete button, we might end up in a
# state, where selection hasn't been restored yet. In that case we
# can't delete anything and will ignore the request.
if not indices :
return
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
orderedImages = self.__orderedImages()
reselectionIndex = len( orderedImages )
for index in reversed( sorted( indices ) ) :
image = self.__images()[ index ]
uiIndex = orderedImages.index( image )
reselectionIndex = min( max( 0, uiIndex - 1 ), reselectionIndex )
self.__images().removeChild( image )
orderedImages.remove( image )
_ImagesPath._updateUIIndices( orderedImages )
# Figure out new selection
if orderedImages :
selectionIndex = self.__uiIndexToIndex( reselectionIndex )
self.getPlug().setValue( selectionIndex )
def __extractClicked( self, *unused ) :
node = self.getPlug().node()
catalogue = self.__catalogue()
outPlug = next( p for p in node.children( GafferImage.ImagePlug ) if catalogue.isAncestorOf( p.source() ) )
for index in self.__indicesFromSelection() :
image = self.__images()[index]
extractNode = GafferImage.CatalogueSelect()
extractNode["in"].setInput( outPlug )
extractNode["imageName"].setValue( image.getName() )
node.parent().addChild( extractNode )
def __exportClicked( self, *unused ) :
bookmarks = GafferUI.Bookmarks.acquire( self, category="image" )
path = Gaffer.FileSystemPath( bookmarks.getDefault( self ) )
path.setFilter(
Gaffer.FileSystemPath.createStandardFilter(
GafferImage.ImageReader.supportedExtensions(),
"Show only image files",
includeSequenceFilter = True,
)
)
dialogue = GafferUI.PathChooserDialogue( path, title = "Export image", confirmLabel = "Export", leaf = True, bookmarks = bookmarks )
path = dialogue.waitForPath( parentWindow = self.ancestor( GafferUI.Window ) )
if not path :
return
index = self.__indicesFromSelection()[0] # button is disabled unless exactly one image is selected
with GafferUI.ErrorDialogue.ErrorHandler( parentWindow = self.ancestor( GafferUI.Window ) ) :
self.__images()[index].save( str( path ) )
def __duplicateClicked( self, *unused ) :
# These are plug indices, rather than ui indices, so need to be
# used directly with self.__images() without remapping.
indices = self.__indicesFromSelection()
# As we may be inserting more than one image, keep a copy of the original
# list so the selection indices remain valid
sourceImages = [ i for i in self.__images().children() ]
# We need to insert the duplicate before the source, as it's usually
# used to snapshot in-progress renders.
orderedImages = self.__orderedImages()
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
insertionIndex = None
for index in indices :
image = sourceImages[ index ]
uiInsertionIndex = orderedImages.index( image )
imageCopy = GafferImage.Catalogue.Image( image.getName() + "Copy", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.__images().addChild( imageCopy )
imageCopy.copyFrom( image )
orderedImages.insert( uiInsertionIndex, imageCopy )
_ImagesPath._updateUIIndices( orderedImages )
# Only switch to the last duplicate if alt is held
altHeld = GafferUI.Widget.currentModifiers() & GafferUI.ModifiableEvent.Modifiers.Alt
if altHeld and uiInsertionIndex is not None :
self.getPlug().setValue( self.__uiIndexToIndex( uiInsertionIndex ) )
def __dropImage( self, eventData ) :
if not self.__catalogue()["directory"].getValue() :
return None
if isinstance( eventData, GafferImage.ImagePlug ) :
return eventData
elif isinstance( eventData, Gaffer.Node ) :
return next( iter( eventData.children( GafferImage.ImagePlug ) ), None )
elif isinstance( eventData, Gaffer.Set ) and len( eventData ) == 1 :
return self.__dropImage( eventData[0] )
else :
return None
def __pathListingDragEnter( self, widget, event ) :
if isinstance( event.data, IECore.StringVectorData ) :
# Allow reordering of images
self.__moveToPath = None
return True
if self.__dropImage( event.data ) is None :
return False
self.__pathListing.setHighlighted( True )
GafferUI.Pointer.setCurrent( "plus" )
return True
def __pathListingDragLeave( self, widget, event ) :
self.__pathListing.setHighlighted( False )
GafferUI.Pointer.setCurrent( None )
return True
def __pathListingDragMove( self, listing, event ) :
if not event.data or not isinstance( event.data, IECore.StringVectorData ) :
return
targetPath = self.__pathListing.pathAt( event.line.p0 )
if targetPath and targetPath == self.__moveToPath :
# We have done the work already, the mouse is just still over the same path
return
self.__moveToPath = targetPath
images = self.__orderedImages()
imagesToMove = [image for image in images if '/'+image.getName() in event.data]
# Because of multi-selection it's possible to move the mouse over a selected image.
# That's not a valid image we want to replace with the current selection - do nothing.
if str( targetPath )[1:] in [image.getName() for image in imagesToMove] :
return
imageToReplace = None
if targetPath is not None :
targetName = str( targetPath )[1:]
for image in images :
if not image.getName() == targetName :
continue
imageToReplace = image
break
else :
# Drag has gone above or below all listed items. Use closest image.
imageToReplace = images[0] if event.line.p0.y < 1 else images[-1]
if not imageToReplace or imageToReplace in imagesToMove :
return
# Reorder images and reassign indices accordingly.
previous = None
for image in imagesToMove :
currentIndex = images.index( image )
images[currentIndex] = None # Add placeholder so we don't mess with indices
if previous :
# Just insert after previous image to preserve odering of selected images
newIndex = images.index( previous ) + 1
else :
newIndex = images.index( imageToReplace )
if currentIndex < newIndex : # Make up for the placeholder
newIndex += 1
images.insert( newIndex, image )
previous = image
_ImagesPath._updateUIIndices( [image for image in images if image ] )
self.__pathListing.getPath().pathChangedSignal()( self.__pathListing.getPath() )
def __pathListingDrop( self, widget, event ) :
image = self.__dropImage( event.data )
if image is None :
return False
with self.getContext() :
fileName = self.__catalogue().generateFileName( image )
imageWriter = GafferImage.ImageWriter()
imageWriter["in"].setInput( image )
imageWriter["fileName"].setValue( fileName )
imageWriter["task"].execute()
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
loadedImage = GafferImage.Catalogue.Image.load( fileName )
loadedImage.setName( image.node().getName() )
self.__images().addChild( loadedImage )
self.getPlug().setValue( len( self.__images() ) - 1 )
self.__pathListing.setHighlighted( False )
GafferUI.Pointer.setCurrent( None )
return True
def __keyPress( self, imageListing, keyEvent ) :
if keyEvent.key in ['Delete', 'Backspace'] :
self.__removeClicked()
return True
return False
def __columnContextMenuDefinition( self ) :
columns = self.__getColumns()
allColumnsSorted = sorted( registeredColumns() )
menu = IECore.MenuDefinition()
menu.append( "/Reset", { "command" : Gaffer.WeakMethod( self.__resetColumns ) } )
menu.append( "/__resetDivider__", { "divider" : True } )
for column in allColumnsSorted :
menu.append( "/%s" % column, {
"checkBox" : column in columns,
"command" : functools.partial( Gaffer.WeakMethod( self.__toggleColumn ), column ),
# Prevent the last column being removed
"active": False if column in columns and len(columns) == 1 else True
} )
return menu
def __contextMenu( self, *unused ) :
if self.getPlug() is None :
return False
# This signal is called anywhere in the listing, check we're over the header.
mousePosition = GafferUI.Widget.mousePosition( relativeTo = self.__pathListing )
headerRect = self.__pathListing._qtWidget().header().rect()
if not headerRect.contains( mousePosition[0], mousePosition[1] ) :
return False
self.__popupMenu = GafferUI.Menu( self.__columnContextMenuDefinition(), "Columns" )
self.__popupMenu.popup( parent = self )
return True
GafferUI.Pointer.registerPointer( "plus", GafferUI.Pointer( "plus.png" ) )
| {
"content_hash": "35777906496f6ffbcccfe25be9870670",
"timestamp": "",
"source": "github",
"line_count": 1049,
"max_line_length": 162,
"avg_line_length": 32.30409914204004,
"alnum_prop": 0.6923008823442618,
"repo_name": "lucienfostier/gaffer",
"id": "a52fda3d087bd45c90a7e24b5e6ad2963b37464f",
"size": "35756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferImageUI/CatalogueUI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7610953"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7892655"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
import logging
from datetime import datetime
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.utils import simplejson
from django.utils.encoding import smart_unicode
# JSONField forked from djblets project
# See LICENSE in courant.core.siteconfig
class JSONField(models.TextField):
"""
A field for storing JSON-encoded data. The data is accessible as standard
Python data types and is transparently encoded/decoded to/from a JSON
string in the database.
"""
serialize_to_string = True
def __init__(self, verbose_name=None, name=None,
encoder=DjangoJSONEncoder(), **kwargs):
models.TextField.__init__(self, verbose_name, name, blank=True,
**kwargs)
self.encoder = encoder
def db_type(self):
return "text"
def contribute_to_class(self, cls, name):
def get_json(model_instance):
return self.dumps(getattr(model_instance, self.attname, None))
def set_json(model_instance, json):
setattr(model_instance, self.attname, self.loads(json))
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, "get_%s_json" % self.name, get_json)
setattr(cls, "set_%s_json" % self.name, set_json)
models.signals.post_init.connect(self.post_init, sender=cls)
def pre_save(self, model_instance, add):
return self.dumps(getattr(model_instance, self.attname, None))
def post_init(self, instance=None, **kwargs):
value = self.value_from_object(instance)
if value:
value = self.loads(value)
else:
value = {}
setattr(instance, self.attname, value)
def get_db_prep_save(self, value):
if not isinstance(value, basestring):
value = self.dumps(value)
return super(JSONField, self).get_db_prep_save(value)
def value_to_string(self, obj):
return self.dumps(self.value_from_object(obj))
def dumps(self, data):
return self.encoder.encode(data)
def loads(self, val):
try:
val = simplejson.loads(val, encoding=settings.DEFAULT_CHARSET)
# XXX We need to investigate why this is happening once we have
# a solid repro case.
if isinstance(val, basestring):
logging.warning("JSONField decode error. Expected dictionary, "
"got string for input '%s'" % s)
# For whatever reason, we may have gotten back
val = simplejson.loads(val, encoding=settings.DEFAULT_CHARSET)
except ValueError:
# There's probably embedded unicode markers (like u'foo') in the
# string. We have to eval it.
val = eval(val)
return val
| {
"content_hash": "32f54abf836ca2e2f4e4150087038a02",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6095013477088949,
"repo_name": "maxcutler/Courant-News",
"id": "991c0075ed04c148203f7918fdc16f455e14646f",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courant/core/utils/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47452"
},
{
"name": "Python",
"bytes": "487441"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup, find_packages
# Requirements.
setup_requirements = ['pytest-runner'] if {'pytest', 'test', 'ptr'}.intersection(sys.argv) else []
install_requirements = ['ipykernel', 'pyreadline; platform_system == "Windows"']
test_requirements = ['pytest', 'pytest-pep8', 'pytest-flakes']
# Fetch readme content.
with open('README.rst', 'r') as readme_file:
readme = readme_file.read()
setup(name='pyprinter',
version='1.5.3',
description='Print Everything!',
long_description=readme,
author='Ofir Brukner',
author_email='ofirbrukner@gmail.com',
url='https://github.com/ofir123/py-printer',
download_url='https://github.com/ofir123/py-printer/archive/1.5.3.tar.gz',
license="MIT",
packages=find_packages(),
setup_requires=setup_requirements,
install_requires=install_requirements,
tests_require=test_requirements,
extras_require={
'test': test_requirements
},
include_package_data=True,
keywords='Python, Python3, color, print, unicode, encoding',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'])
| {
"content_hash": "13cf071556684bde895bad570a054022",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 98,
"avg_line_length": 40.390243902439025,
"alnum_prop": 0.6056763285024155,
"repo_name": "ofir123/PyPrinter",
"id": "254fd0c12e3599ecd65265059cc1e552cd593825",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43703"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.