code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from django.contrib import admin
from video.models import Season, Video, VideoLiked
admin.site.register((Season, Video, VideoLiked))
|
pobear/restless
|
examples/statmap/video/admin.py
|
Python
|
bsd-3-clause
| 134
|
from __future__ import division
import re
import glob
import os
import sys
import unittest
from unittest import TestCase as BaseTestCase
suites = []
add = suites.append
class TestCase(BaseTestCase):
def failUnlessRaisesRegexp(self, exc, re_, fun, *args, **kwargs):
def wrapped(*args, **kwargs):
try:
fun(*args, **kwargs)
except Exception, e:
self.failUnless(re.search(re_, str(e)))
raise
self.failUnlessRaises(exc, wrapped, *args, **kwargs)
# silence deprec warnings about useless renames
failUnless = BaseTestCase.assertTrue
failIf = BaseTestCase.assertFalse
failUnlessEqual = BaseTestCase.assertEqual
failUnlessRaises = BaseTestCase.assertRaises
failUnlessAlmostEqual = BaseTestCase.assertAlmostEqual
failIfEqual = BaseTestCase.assertNotEqual
failIfAlmostEqual = BaseTestCase.assertNotAlmostEqual
def assertReallyEqual(self, a, b):
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
self.assertEqual(0, cmp(a, b))
self.assertEqual(0, cmp(b, a))
def assertReallyNotEqual(self, a, b):
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
self.assertNotEqual(0, cmp(a, b))
self.assertNotEqual(0, cmp(b, a))
for name in glob.glob(os.path.join(os.path.dirname(__file__), "test_*.py")):
module = "tests." + os.path.basename(name)
__import__(module[:-3], {}, {}, [])
class Result(unittest.TestResult):
separator1 = '=' * 70
separator2 = '-' * 70
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
sys.stdout.write('.')
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
sys.stdout.write('E')
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
sys.stdout.write('F')
def printErrors(self):
succ = self.testsRun - (len(self.errors) + len(self.failures))
v = "%3d" % succ
count = 50 - self.testsRun
sys.stdout.write((" " * count) + v + "\n")
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
sys.stdout.write(self.separator1 + "\n")
sys.stdout.write("%s: %s\n" % (flavour, str(test)))
sys.stdout.write(self.separator2 + "\n")
sys.stdout.write("%s\n" % err)
class Runner(object):
def run(self, test):
suite = unittest.makeSuite(test)
pref = '%s (%d): ' % (test.__name__, len(suite._tests))
print pref + " " * (25 - len(pref)),
result = Result()
suite(result)
result.printErrors()
return bool(result.failures + result.errors)
def unit(run=[], quick=False):
import mmap
runner = Runner()
failures = False
tests = [t for t in suites if not run or t.__name__ in run]
# normal run, trace mmap calls
orig_mmap = mmap.mmap
uses_mmap = []
print "Running tests with real mmap."
for test in tests:
def new_mmap(*args, **kwargs):
if test not in uses_mmap:
uses_mmap.append(test)
return orig_mmap(*args, **kwargs)
mmap.mmap = new_mmap
failures |= runner.run(test)
mmap.mmap = orig_mmap
# make sure the above works
if not run:
assert len(uses_mmap) > 1
if quick:
return failures
# run mmap using tests with mocked lockf
try:
import fcntl
except ImportError:
print "Unable to run mocked fcntl.lockf tests."
else:
def MockLockF(*args, **kwargs):
raise IOError
lockf = fcntl.lockf
fcntl.lockf = MockLockF
print "Running tests with mocked failing fcntl.lockf."
for test in uses_mmap:
failures |= runner.run(test)
fcntl.lockf = lockf
# failing mmap.move
class MockMMap(object):
def __init__(self, *args, **kwargs):
pass
def move(self, dest, src, count):
raise ValueError
def close(self):
pass
print "Running tests with mocked failing mmap.move."
mmap.mmap = MockMMap
for test in uses_mmap:
failures |= runner.run(test)
# failing mmap.mmap
def MockMMap2(*args, **kwargs):
raise EnvironmentError
mmap.mmap = MockMMap2
print "Running tests with mocked failing mmap.mmap."
for test in uses_mmap:
failures |= runner.run(test)
return failures
|
hanvo/MusicCloud
|
Crawler/Install Files/mutagen-1.22/tests/__init__.py
|
Python
|
bsd-3-clause
| 4,882
|
import pandas as pd
import redcap as rc
"""
## FIXME
- There's multiple functions doing mostly the same thing, but not quite. I need to go through it.
"""
def get_items_matching_regex(regex, haystack):
import re
return list(filter(lambda x: re.search(regex, x), haystack))
def get_notnull_entries(row, ignore_always_notna=True):
# FIXME: should accept columns to ignore as an argument
try:
columns = row.columns.tolist()
except:
columns = row.index.tolist()
if ignore_always_notna:
cols_complete = get_items_matching_regex("_complete$", columns)
cols_ignore = get_items_matching_regex("^visit_ignore", columns)
cols_missing = get_items_matching_regex("_missing(_why(_other)?)?$",
columns)
#cols_clinical = get_items_matching_regex("^fh_|^ssq_", columns)
# cols_missing_rationale = get_items_matching_regex("missing_why(_other)?$",
# columns)
cols_checklist = get_items_matching_regex("___", columns)
non_nan_items = row.drop(cols_complete + cols_ignore + cols_missing +
#cols_clinical + # cols_missing_rationale +
cols_checklist).notnull()
else:
non_nan_items = row.notnull()
return non_nan_items
def count_non_nan_rowwise(df, form_name=None, drop_column=None):
""" A more efficient method of checking non-NaN values """
# 1. check complete
if form_name:
complete_field = form_name + '_complete'
if drop_columns:
drop_columns.append(complete_field)
else:
drop_columns = [complete_field]
if drop_columns is None:
drop_columns = []
# 2. count up NaNs
return df.drop(drop_columns, axis=1).notnull().sum(axis=1)
def count_notnull_entries(row):
try:
return get_notnull_entries(row).sum(axis=1)
except ValueError:
return get_notnull_entries(row).sum()
def has_notnull_entries(row):
return get_notnull_entries(row).any()
def form_has_content(row):
""" If the form is knowledgeably not empty (e.g. marked missing or
marked complete) or it has content in it, it is considered to have content. """
try:
columns = row.columns.tolist()
except:
columns = row.index.tolist()
cols_complete = get_items_matching_regex("complete$", columns)
cols_missing = get_items_matching_regex("missing$", columns)
cols_checklist = get_items_matching_regex("___", columns)
if cols_missing:
missing = (row[cols_missing] == 1).any()
else:
missing = None
if cols_complete:
complete = (row[cols_complete].isin([2])).any()
else:
complete = False
non_nan_items = row.drop(cols_complete + cols_missing + cols_checklist).notnull().any()
return missing | complete | non_nan_items
def form_has_content_and_is_not_missing(row):
""" If the form is *not* marked missing *and* has any non-empty non-meta
fields (which count_notnull_entries gets), then it's considered to have content.
"""
try:
columns = row.columns.tolist()
except:
columns = row.index.tolist()
cols_missing = get_items_matching_regex("missing$", columns)
if cols_missing:
missing = (row[cols_missing] == 1).any()
else:
missing = None
notnull_count = count_notnull_entries(row)
return (not missing) and (notnull_count > 0)
# Taken from http://pycap.readthedocs.io/en/latest/deep.html#dealing-with-large-exports
# and adapted to scope down to forms
# FIXME: Possibly duplicates chunk edges? Need to check it out
def chunked_form_export(project, forms, events=None, include_dag=False, chunk_size=100, fields=[]):
if isinstance(forms, str):
forms = [forms]
if isinstance(events, str):
events = [events]
def chunks(l, n):
"""Yield successive n-sized chunks from list l"""
for i in range(0, len(l), n):
yield l[i:i+n]
record_list = project.export_records(fields=[project.def_field])
records = [r[project.def_field] for r in record_list]
try:
response = None
record_count = 0
for record_chunk in chunks(records, chunk_size):
record_count = record_count + chunk_size
#print record_count
try:
chunked_response = project.export_records(records=record_chunk,
fields=[project.def_field] + fields,
forms=forms,
events=events,
export_data_access_groups=include_dag,
format='df',
df_kwargs={'low_memory': False})
except pd.errors.EmptyDataError:
print("Empty DataFrame error for event {}, fields {}, forms {}"
.format(events, fields, forms))
continue
if response is not None:
response = pd.concat([response, chunked_response], axis=0)
else:
response = chunked_response
except rc.RedcapError:
msg = "Chunked export failed for chunk_size={:d}".format(chunk_size)
raise ValueError(msg)
else:
if project.is_longitudinal:
response.set_index([project.def_field, 'redcap_event_name'], inplace=True)
else:
response.set_index([project.def_field], inplace=True)
return response
|
sibis-platform/ncanda-data-integration
|
scripts/qc/qa_utils.py
|
Python
|
bsd-3-clause
| 5,817
|
import tests.periodicities.period_test as per
per.buildModel((120 , 'BH' , 200));
|
antoinecarme/pyaf
|
tests/periodicities/Business_Hour/Cycle_Business_Hour_200_BH_120.py
|
Python
|
bsd-3-clause
| 84
|
#!/usr/bin/env python
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Adaptive import DoubleLMSFilter
from nose.tools import raises
def filter(input, reference):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = 48000
reffilter = DoubleInPointerFilter(reference, False)
reffilter.input_sampling_rate = 48000
rls = DoubleLMSFilter(10)
rls.input_sampling_rate = 48000
rls.memory = 0.99
rls.set_input_port(0, infilter, 0)
rls.set_input_port(1, reffilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = 48000
outfilter.set_input_port(0, rls, 0)
outfilter.process(input.shape[1])
return output
def LMS_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
d = np.fromfile(dirname + os.sep + "input_lms.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output_lms.dat", dtype=np.float64).reshape(1, -1)
out = filter(d, d)
assert_almost_equal(out, ref)
@raises(ValueError)
def LMS_bad_dim_test():
import numpy as np
lms = DoubleLMSFilter(100)
lms.w = np.array(())
@raises(ValueError)
def LMS_bad_size_test():
import numpy as np
lms = DoubleLMSFilter(100)
lms.w = np.ones((10,))
if __name__ == "__main__":
import numpy as np
size = 1200
x = np.arange(size).reshape(1, -1) / 48000.
d = np.sin(x * 2 * np.pi * 100, dtype=np.float64)
d.tofile("input_lms.dat")
out = filter(d, d)
out.tofile("output_lms.dat")
import matplotlib.pyplot as plt
plt.plot(x[0], d[0], label="input")
plt.plot(x[0], out[0], label="output")
plt.legend()
plt.show()
|
mbrucher/AudioTK
|
tests/Python/Adaptive/PyATKAdaptive_lms_test.py
|
Python
|
bsd-3-clause
| 1,784
|
from __future__ import absolute_import
import os
import fcntl
import pwd
import re
import shutil
import sys
import time
import random
import smtplib
import traceback
import time
from collections import defaultdict
from subprocess import check_output
from datetime import datetime, time as dtime
from metatlas.mzml_loader import VERSION_TIMESTAMP
from metatlas.datastructures.metatlas_objects import LcmsRun, retrieve, store
from metatlas.io.mzml_loader import mzml_to_hdf
ADMIN = 'bpb'
def send_mail(subject, username, body, force=False):
"""Send the mail only once per day."""
now = datetime.now()
if force or dtime(00, 00) <= now.time() <= dtime(00, 10):
sender = 'pasteur@nersc.gov'
receivers = ['%s@nersc.gov' % username, '%s@nersc.gov' % ADMIN]
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (sender, ", ".join(receivers), subject, body)
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
sys.stdout.write("Successfully sent email to %s\n" % username)
sys.stdout.flush()
except smtplib.SMTPException:
sys.stderr.write("Error: unable to send email to %s\n" % username)
sys.stdout.flush()
def get_acqtime_from_mzml(mzml_file):
startTimeStamp=None
with open(mzml_file) as mzml:
for line in mzml:
if 'startTimeStamp' in line:
startTimeStamp = line.split('startTimeStamp="')[1].split('"')[0].replace('T',' ').rstrip('Z')
break
# print startTimeStamp
if not '-infinity' in startTimeStamp:
date_object = datetime.strptime(startTimeStamp, '%Y-%m-%d %H:%M:%S')
utc_timestamp = int(time.mktime(date_object.timetuple()))
else:
utc_timestamp = int(0)
return utc_timestamp
def update_metatlas(directory):
readonly_files = defaultdict(set)
other_errors = defaultdict(list)
directory = os.path.abspath(directory)
# Sleep a random amount of time to avoid running at the same time as
# other processes.
time.sleep(random.random() * 2)
mzml_files = check_output('find %s -name "*.mzML"' % directory, shell=True)
mzml_files = mzml_files.decode('utf-8').splitlines()
# Find valid h5 files newer than the format version timestamp.
delta = int((time.time() - VERSION_TIMESTAMP) / 60)
check = 'find %s -name "*.h5" -mmin -%s -size +2k' % (directory, delta)
valid_files = check_output(check, shell=True).decode('utf-8').splitlines()
valid_files = set(valid_files)
new_files = []
for mzml_file in mzml_files:
if mzml_file.replace('.mzML', '.h5') not in valid_files:
new_files.append(mzml_file)
patt = re.compile(r".+\/raw_data\/(?P<username>[^/]+)\/(?P<experiment>[^/]+)\/(?P<path>.+)")
sys.stdout.write('Found %s files\n' % len(new_files))
sys.stdout.flush()
for (ind, fname) in enumerate(new_files):
sys.stdout.write('(%s of %s): %s\n' % (ind + 1, len(new_files), fname))
sys.stdout.flush()
# Get relevant information about the file.
info = patt.match(os.path.abspath(fname))
if info:
info = info.groupdict()
else:
sys.stdout.write("Invalid path name: %s\n" % fname)
sys.stdout.flush()
continue
dirname = os.path.dirname(fname)
try:
username = pwd.getpwuid(os.stat(fname).st_uid).pw_name
except OSError:
try:
username = pwd.getpwuid(os.stat(dirname).st_uid).pw_name
except Exception:
username = info['username']
# Change to read only.
try:
os.chmod(fname, 0o660)
except Exception as e:
sys.stderr.write(str(e) + '\n')
sys.stderr.flush()
# Copy the original file to a pasteur backup.
if os.environ['USER'] == 'pasteur':
pasteur_path = fname.replace('raw_data', 'pasteur_backup')
dname = os.path.dirname(pasteur_path)
if not os.path.exists(dname):
os.makedirs(dname)
try:
shutil.copy(fname, pasteur_path)
except IOError as e:
readonly_files[username].add(dirname)
continue
# Get a lock on the mzml file to prevent interference.
try:
fid = open(fname, 'r')
fcntl.flock(fid, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
fid.close()
msg = '%s already converting in another process\n' % fname
sys.stderr.write(msg)
sys.stderr.flush()
continue
# Convert to HDF and store the entry in the database.
try:
hdf5_file = fname.replace('mzML', 'h5')
#Get Acquisition Time Here
acquisition_time = get_acqtime_from_mzml(fname)
mzml_to_hdf(fname, hdf5_file, True)
os.chmod(hdf5_file, 0o660)
description = info['experiment'] + ' ' + info['path']
ctime = os.stat(fname).st_ctime
# Add this to the database unless it is already there
try:
runs = retrieve('lcmsrun', username='*', mzml_file=fname)
except Exception:
runs = list()
if not len(runs):
run = LcmsRun(name=info['path'], description=description,
username=info['username'],
experiment=info['experiment'],
creation_time=ctime, last_modified=ctime,
mzml_file=fname, hdf5_file=hdf5_file, acquisition_time = acquisition_time)
store(run)
except Exception as e:
if 'exists but it can not be written' in str(e):
readonly_files[username].add(dirname)
else:
msg = traceback.format_exception(*sys.exc_info())
msg.insert(0, 'Cannot convert %s' % fname)
other_errors[info['username']].append('\n'.join(msg))
sys.stderr.write(str(e) + '\n')
sys.stderr.flush()
try:
os.remove(hdf5_file)
except:
pass
finally:
fid.close()
# Handle errors.
from metatlas.metatlas_objects import find_invalid_runs
invalid_runs = find_invalid_runs(_override=True)
if readonly_files:
for (username, dirnames) in readonly_files.items():
body = ("Please log in to NERSC and run 'chmod 777' on the "
"following directories:\n%s" % ('\n'.join(dirnames)))
send_mail('Metatlas Files are Inaccessible', username, body)
if invalid_runs:
grouped = defaultdict(list)
for run in invalid_runs:
grouped[run.username].append(run.mzml_file)
for (username, filenames) in grouped.items():
body = 'You have runs that are not longer accessible\n'
body += 'To remove them from the database, run the following on ipython.nersc.gov:\n\n'
body += 'from metatlas.metatlas_objects import find_invalid_runs, remove_objects\n'
body += 'remove_objects(find_invalid_runs())\n\n'
body += 'The invalid runs are:\n%s' % ('\n'.join(filenames))
send_mail('Metatlas Runs are Invalid', username, body)
if other_errors:
for (username, errors) in other_errors.items():
body = 'Errored files found while loading in Metatlas files:\n\n%s' % '\n********************************\n'.join(errors)
send_mail('Errors loading Metatlas files', username, body)
sys.stdout.write('Done!\n')
sys.stdout.flush()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Watchdog to monitor directory for new files")
parser.add_argument("directory", type=str, nargs=1, help="Directory to watch")
args = parser.parse_args()
sys.stdout.write(str(args) + '\n')
sys.stdout.flush()
update_metatlas(args.directory[0])
|
biorack/metatlas
|
metatlas/io/directory_watcher.py
|
Python
|
bsd-3-clause
| 8,146
|
import random
import string
import factory
from models import Position
class PositionFactory(factory.Factory):
class Meta:
model = Position
title = factory.Sequence(lambda n: 'Position Title %d' % n)
short_title = factory.Sequence(lambda n: 'Position Short Title %d' % n)
enabled = random.random < 0.3
|
publica-io/django-publica-positions
|
positions/factories.py
|
Python
|
bsd-3-clause
| 329
|
from base64 import b64decode
from .packet import construct_packet
from .utils import PgpdumpException, crc24
class BinaryData(object):
'''The base object used for extracting PGP data packets. This expects fully
binary data as input; such as that read from a .sig or .gpg file.'''
binary_tag_flag = 0x80
def __init__(self, data):
if not data:
raise PgpdumpException("no data to parse")
if len(data) <= 1:
raise PgpdumpException("data too short")
data = bytearray(data)
# 7th bit of the first byte must be a 1
if not bool(data[0] & self.binary_tag_flag):
raise PgpdumpException("incorrect binary data")
self.data = data
self.length = len(data)
def packets(self):
'''A generator function returning PGP data packets.'''
offset = 0
while offset < self.length:
total_length, packet = construct_packet(self.data, offset)
offset += total_length
yield packet
def __repr__(self):
return "<%s: length %d>" % (
self.__class__.__name__, self.length)
class AsciiData(BinaryData):
'''A wrapper class that supports ASCII-armored input. It searches for the
first PGP magic header and extracts the data contained within.'''
def __init__(self, data):
self.original_data = data
data = self.strip_magic(data)
data, known_crc = self.split_data_crc(data)
data = bytearray(b64decode(data))
if known_crc:
# verify it if we could find it
actual_crc = crc24(data)
if known_crc != actual_crc:
raise PgpdumpException(
"CRC failure: known 0x%x, actual 0x%x" % (
known_crc, actual_crc))
super(AsciiData, self).__init__(data)
@staticmethod
def strip_magic(data):
'''Strip away the '-----BEGIN PGP SIGNATURE-----' and related cruft so
we can safely base64 decode the remainder.'''
idx = 0
magic = b'-----BEGIN PGP '
ignore = b'-----BEGIN PGP SIGNED '
# find our magic string, skiping our ignored string
while True:
idx = data.find(magic, idx)
if data[idx:len(ignore)] != ignore:
break
idx += 1
if idx >= 0:
# find the start of the actual data. it always immediately follows
# a blank line, meaning headers are done.
nl_idx = data.find(b'\n\n', idx)
if nl_idx < 0:
nl_idx = data.find(b'\r\n\r\n', idx)
if nl_idx < 0:
raise PgpdumpException(
"found magic, could not find start of data")
# now find the end of the data.
end_idx = data.find(b'-----', nl_idx)
if end_idx:
data = data[nl_idx:end_idx]
else:
data = data[nl_idx:]
return data
@staticmethod
def split_data_crc(data):
'''The Radix-64 format appends any CRC checksum to the end of the data
block, in the form '=alph', where there are always 4 ASCII characters
correspnding to 3 digits (24 bits). Look for this special case.'''
# don't let newlines trip us up
data = data.rstrip()
# this funkyness makes it work without changes in Py2 and Py3
if data[-5] in (b'=', ord(b'=')):
# CRC is returned without the = and converted to a decimal
crc = b64decode(data[-4:])
# same noted funkyness as above, due to bytearray implementation
crc = [ord(c) if isinstance(c, str) else c for c in crc]
crc = (crc[0] << 16) + (crc[1] << 8) + crc[2]
return (data[:-5], crc)
return (data, None)
|
toofishes/python-pgpdump
|
pgpdump/data.py
|
Python
|
bsd-3-clause
| 3,846
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import warnings
from operator import or_, itemgetter
from copy import deepcopy
from itertools import combinations
from functools import reduce
from collections import defaultdict
import numpy as np
from scipy.stats import pearsonr
from skbio._base import SkbioObject
from skbio.stats.distance import DistanceMatrix
from ._exception import (NoLengthError, DuplicateNodeError, NoParentError,
MissingNodeError, TreeError)
from skbio.util import RepresentationWarning
from skbio.util._decorator import experimental, classonlymethod
def distance_from_r(m1, m2):
r"""Estimates distance as (1-r)/2: neg correl = max distance
Parameters
----------
m1 : DistanceMatrix
a distance matrix to compare
m2 : DistanceMatrix
a distance matrix to compare
Returns
-------
float
The distance between m1 and m2
"""
return (1-pearsonr(m1.data.flat, m2.data.flat)[0])/2
class TreeNode(SkbioObject):
r"""Representation of a node within a tree
A `TreeNode` instance stores links to its parent and optional children
nodes. In addition, the `TreeNode` can represent a `length` (e.g., a
branch length) between itself and its parent. Within this object, the use
of "children" and "descendants" is frequent in the documentation. A child
is a direct descendant of a node, while descendants are all nodes that are
below a given node (e.g., grand-children, etc).
Parameters
----------
name : str or None
A node can have a name. It is common for tips in particular to have
names, for instance, in a phylogenetic tree where the tips correspond
to species.
length : float, int, or None
Length of the branch connecting this node to its parent. Can represent
ellapsed time, amount of mutations, or other measures of evolutionary
distance.
support : float, int, or None
Support value of the branch connecting this node to its parent. Can be
bootstrap value, posterior probability, or other metrics measuring the
confidence or frequency of this branch.
parent : TreeNode or None
Connect this node to a parent
children : list of TreeNode or None
Connect this node to existing children
"""
default_write_format = 'newick'
_exclude_from_copy = set(['parent', 'children', '_tip_cache',
'_non_tip_cache'])
@experimental(as_of="0.4.0")
def __init__(self, name=None, length=None, support=None, parent=None,
children=None):
self.name = name
self.length = length
self.support = support
self.parent = parent
self._tip_cache = {}
self._non_tip_cache = {}
self._registered_caches = set()
self.children = []
self.id = None
if children is not None:
self.extend(children)
@experimental(as_of="0.4.0")
def __repr__(self):
r"""Returns summary of the tree
Returns
-------
str
A summary of this node and all descendants
Notes
-----
This method returns the name of the node and a count of tips and the
number of internal nodes in the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c, d)root;"])
>>> repr(tree)
'<TreeNode, name: root, internal node count: 1, tips count: 3>'
"""
nodes = [n for n in self.traverse(include_self=False)]
n_tips = sum([n.is_tip() for n in nodes])
n_nontips = len(nodes) - n_tips
classname = self.__class__.__name__
name = self.name if self.name is not None else "unnamed"
return "<%s, name: %s, internal node count: %d, tips count: %d>" % \
(classname, name, n_nontips, n_tips)
@experimental(as_of="0.4.0")
def __str__(self):
r"""Returns string version of self, with names and distances
Returns
-------
str
Returns a Newick representation of the tree
See Also
--------
read
write
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> str(tree)
'((a,b)c);\n'
"""
return str(''.join(self.write([])))
@experimental(as_of="0.4.0")
def __iter__(self):
r"""Node iter iterates over the `children`."""
return iter(self.children)
@experimental(as_of="0.4.0")
def __len__(self):
return len(self.children)
@experimental(as_of="0.4.0")
def __getitem__(self, i):
r"""Node delegates slicing to `children`."""
return self.children[i]
@experimental(as_of="0.4.0")
def _adopt(self, node):
r"""Update `parent` references but does NOT update `children`."""
self.invalidate_caches()
if node.parent is not None:
node.parent.remove(node)
node.parent = self
return node
@experimental(as_of="0.4.0")
def append(self, node):
r"""Appends a node to `children`, in-place, cleaning up refs
`append` will invalidate any node lookup caches, remove an existing
parent on `node` if one exists, set the parent of `node` to self
and add the `node` to `self` `children`.
Parameters
----------
node : TreeNode
An existing TreeNode object
See Also
--------
extend
Examples
--------
>>> from skbio import TreeNode
>>> root = TreeNode(name="root")
>>> child1 = TreeNode(name="child1")
>>> child2 = TreeNode(name="child2")
>>> root.append(child1)
>>> root.append(child2)
>>> print(root)
(child1,child2)root;
<BLANKLINE>
"""
self.children.append(self._adopt(node))
@experimental(as_of="0.4.0")
def extend(self, nodes):
r"""Append a `list` of `TreeNode` to `self`.
`extend` will invalidate any node lookup caches, remove existing
parents of the `nodes` if they have any, set their parents to self
and add the nodes to `self` `children`.
Parameters
----------
nodes : list of TreeNode
A list of TreeNode objects
See Also
--------
append
Examples
--------
>>> from skbio import TreeNode
>>> root = TreeNode(name="root")
>>> root.extend([TreeNode(name="child1"), TreeNode(name="child2")])
>>> print(root)
(child1,child2)root;
<BLANKLINE>
"""
self.children.extend([self._adopt(n) for n in nodes[:]])
@experimental(as_of="0.4.0")
def pop(self, index=-1):
r"""Remove a `TreeNode` from `self`.
Remove a child node by its index position. All node lookup caches
are invalidated, and the parent reference for the popped node will be
set to `None`.
Parameters
----------
index : int
The index position in `children` to pop
Returns
-------
TreeNode
The popped child
See Also
--------
remove
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(a,b)c;"])
>>> print(tree.pop(0))
a;
<BLANKLINE>
"""
return self._remove_node(index)
def _remove_node(self, idx):
r"""The actual (and only) method that performs node removal"""
self.invalidate_caches()
node = self.children.pop(idx)
node.parent = None
return node
@experimental(as_of="0.4.0")
def remove(self, node):
r"""Remove a node from self
Remove a `node` from `self` by identity of the node.
Parameters
----------
node : TreeNode
The node to remove from self's children
Returns
-------
bool
`True` if the node was removed, `False` otherwise
See Also
--------
pop
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(a,b)c;"])
>>> tree.remove(tree.children[0])
True
"""
for (i, curr_node) in enumerate(self.children):
if curr_node is node:
self._remove_node(i)
return True
return False
@experimental(as_of="0.4.0")
def remove_deleted(self, func):
r"""Delete nodes in which `func(node)` evaluates `True`.
Remove all descendants from `self` that evaluate `True` from `func`.
This has the potential to drop clades.
Parameters
----------
func : a function
A function that evaluates `True` when a node should be deleted
See Also
--------
pop
remove
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(a,b)c;"])
>>> tree.remove_deleted(lambda x: x.name == 'b')
>>> print(tree)
(a)c;
<BLANKLINE>
"""
for node in self.traverse(include_self=False):
if func(node):
node.parent.remove(node)
@experimental(as_of="0.4.0")
def prune(self):
r"""Reconstructs correct topology after nodes have been removed.
Internal nodes with only one child will be removed and new connections
will be made to reflect change. This method is useful to call
following node removals as it will clean up nodes with singular
children.
Names and properties of singular children will override the names and
properties of their parents following the prune.
Node lookup caches are invalidated.
See Also
--------
shear
remove
pop
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> to_delete = tree.find('b')
>>> tree.remove_deleted(lambda x: x == to_delete)
>>> print(tree)
((a)c,(d,e)f)root;
<BLANKLINE>
>>> tree.prune()
>>> print(tree)
((d,e)f,a)root;
<BLANKLINE>
"""
# build up the list of nodes to remove so the topology is not altered
# while traversing
nodes_to_remove = []
for node in self.traverse(include_self=False):
if len(node.children) == 1:
nodes_to_remove.append(node)
# clean up the single children nodes
for node in nodes_to_remove:
child = node.children[0]
if child.length is None or node.length is None:
child.length = child.length or node.length
else:
child.length += node.length
if node.parent is None:
continue
node.parent.append(child)
node.parent.remove(node)
# if a single descendent from the root, the root adopts the childs
# properties. we can't "delete" the root as that would be deleting
# self.
if len(self.children) == 1:
node_to_copy = self.children[0]
efc = self._exclude_from_copy
for key in node_to_copy.__dict__:
if key not in efc:
self.__dict__[key] = deepcopy(node_to_copy.__dict__[key])
self.remove(node_to_copy)
self.extend(node_to_copy.children)
@experimental(as_of="0.4.0")
def shear(self, names):
"""Lop off tips until the tree just has the desired tip names.
Parameters
----------
names : Iterable of str
The tip names on the tree to keep
Returns
-------
TreeNode
The resulting tree
Raises
------
ValueError
If the names do not exist in the tree
See Also
--------
prune
remove
pop
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> t = TreeNode.read(['((H:1,G:1):2,(R:0.5,M:0.7):3);'])
>>> sheared = t.shear(['G', 'M'])
>>> print(sheared)
(G:3.0,M:3.7);
<BLANKLINE>
"""
tcopy = self.deepcopy()
all_tips = {n.name for n in tcopy.tips()}
ids = set(names)
if not ids.issubset(all_tips):
raise ValueError("ids are not a subset of the tree.")
marked = set()
for tip in tcopy.tips():
if tip.name in ids:
marked.add(tip)
for anc in tip.ancestors():
if anc in marked:
break
else:
marked.add(anc)
for node in list(tcopy.traverse()):
if node not in marked:
node.parent.remove(node)
tcopy.prune()
return tcopy
@experimental(as_of="0.4.0")
def copy(self):
r"""Returns a copy of self using an iterative approach
Perform an iterative deepcopy of self. It is not assured that the copy
of node attributes will be performed iteratively as that depends on
the copy method of the types being copied
Returns
-------
TreeNode
A new copy of self
See Also
--------
unrooted_deepcopy
unrooted_copy
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> tree_copy = tree.copy()
>>> tree_nodes = set([id(n) for n in tree.traverse()])
>>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()])
>>> print(len(tree_nodes.intersection(tree_copy_nodes)))
0
"""
def __copy_node(node_to_copy):
r"""Helper method to copy a node"""
# this is _possibly_ dangerous, we're assuming the node to copy is
# of the same class as self, and has the same exclusion criteria.
# however, it is potentially dangerous to mix TreeNode subclasses
# within a tree, so...
result = self.__class__()
efc = self._exclude_from_copy
for key in node_to_copy.__dict__:
if key not in efc:
result.__dict__[key] = deepcopy(node_to_copy.__dict__[key])
return result
root = __copy_node(self)
nodes_stack = [[root, self, len(self.children)]]
while nodes_stack:
# check the top node, any children left unvisited?
top = nodes_stack[-1]
new_top_node, old_top_node, unvisited_children = top
if unvisited_children:
top[2] -= 1
old_child = old_top_node.children[-unvisited_children]
new_child = __copy_node(old_child)
new_top_node.append(new_child)
nodes_stack.append([new_child, old_child,
len(old_child.children)])
else: # no unvisited children
nodes_stack.pop()
return root
__copy__ = copy
__deepcopy__ = deepcopy = copy
@experimental(as_of="0.4.0")
def unrooted_deepcopy(self, parent=None):
r"""Walks the tree unrooted-style and returns a new copy
Perform a deepcopy of self and return a new copy of the tree as an
unrooted copy. This is useful for defining new roots of the tree as
the `TreeNode`.
This method calls `TreeNode.unrooted_copy` which is recursive.
Parameters
----------
parent : TreeNode or None
Used to avoid infinite loops when performing the unrooted traverse
Returns
-------
TreeNode
A new copy of the tree
See Also
--------
copy
unrooted_copy
root_at
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
>>> new_tree = tree.find('d').unrooted_deepcopy()
>>> print(new_tree)
(b,c,(a,((f,g)h)e)d)root;
<BLANKLINE>
"""
root = self.root()
root.assign_ids()
new_tree = root.copy()
new_tree.assign_ids()
new_tree_self = new_tree.find_by_id(self.id)
return new_tree_self.unrooted_copy(parent)
@experimental(as_of="0.4.0")
def unrooted_copy(self, parent=None):
r"""Walks the tree unrooted-style and returns a copy
Perform a copy of self and return a new copy of the tree as an
unrooted copy. This is useful for defining new roots of the tree as
the `TreeNode`.
This method is recursive.
Warning, this is _NOT_ a deepcopy
Parameters
----------
parent : TreeNode or None
Used to avoid infinite loops when performing the unrooted traverse
Returns
-------
TreeNode
A new copy of the tree
See Also
--------
copy
unrooted_deepcopy
root_at
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
>>> new_tree = tree.find('d').unrooted_copy()
>>> print(new_tree)
(b,c,(a,((f,g)h)e)d)root;
<BLANKLINE>
"""
neighbors = self.neighbors(ignore=parent)
children = [c.unrooted_copy(parent=self) for c in neighbors]
# we might be walking UP the tree, so:
if parent is None:
# base edge
edgename = None
length = None
elif parent.parent is self:
# self's parent is becoming self's child
edgename = parent.name
length = parent.length
else:
assert parent is self.parent
edgename = self.name
length = self.length
result = self.__class__(name=edgename, children=children,
length=length)
if parent is None:
result.name = "root"
return result
@experimental(as_of="0.4.0")
def count(self, tips=False):
"""Get the count of nodes in the tree
Parameters
----------
tips : bool
If `True`, only return the count of the number of tips
Returns
-------
int
The number of nodes or tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
>>> print(tree.count())
9
>>> print(tree.count(tips=True))
5
"""
if tips:
return len(list(self.tips()))
else:
return len(list(self.traverse(include_self=True)))
@experimental(as_of="0.4.1")
def observed_node_counts(self, tip_counts):
"""Returns counts of node observations from counts of tip observations
Parameters
----------
tip_counts : dict of ints
Counts of observations of tips. Keys correspond to tip names in
``self``, and counts are unsigned ints.
Returns
-------
dict
Counts of observations of nodes. Keys correspond to node names
(internal nodes or tips), and counts are unsigned ints.
Raises
------
ValueError
If a count less than one is observed.
MissingNodeError
If a count is provided for a tip not in the tree, or for an
internal node.
"""
result = defaultdict(int)
for tip_name, count in tip_counts.items():
if count < 1:
raise ValueError("All tip counts must be greater than zero.")
else:
t = self.find(tip_name)
if not t.is_tip():
raise MissingNodeError(
"Counts can only be for tips in the tree. %s is an "
"internal node." % t.name)
result[t] += count
for internal_node in t.ancestors():
result[internal_node] += count
return result
@experimental(as_of="0.4.0")
def subtree(self, tip_list=None):
r"""Make a copy of the subtree"""
raise NotImplementedError()
@experimental(as_of="0.4.0")
def subset(self):
r"""Returns set of names that descend from specified node
Get the set of `name` on tips that descend from this node.
Returns
-------
frozenset
The set of names at the tips of the clade that descends from self
See Also
--------
subsets
compare_subsets
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"])
>>> sorted(tree.subset())
['a', 'b', 'c', 'f', 'g']
"""
return frozenset({i.name for i in self.tips()})
@experimental(as_of="0.4.0")
def subsets(self):
r"""Return all sets of names that come from self and its descendants
Compute all subsets of tip names over `self`, or, represent a tree as a
set of nested sets.
Returns
-------
frozenset
A frozenset of frozensets of str
See Also
--------
subset
compare_subsets
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(((a,b)c,(d,e)f)h)root;"])
>>> subsets = tree.subsets()
>>> len(subsets)
3
"""
sets = []
for i in self.postorder(include_self=False):
if not i.children:
i.__leaf_set = frozenset([i.name])
else:
leaf_set = reduce(or_, [c.__leaf_set for c in i.children])
if len(leaf_set) > 1:
sets.append(leaf_set)
i.__leaf_set = leaf_set
return frozenset(sets)
@experimental(as_of="0.4.0")
def root_at(self, node):
r"""Return a new tree rooted at the provided node.
This can be useful for drawing unrooted trees with an orientation that
reflects knowledge of the true root location.
Parameters
----------
node : TreeNode or str
The node to root at
Returns
-------
TreeNode
A new copy of the tree
Raises
------
TreeError
Raises a `TreeError` if a tip is specified as the new root
See Also
--------
root_at_midpoint
unrooted_deepcopy
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(((a,b)c,(d,e)f)g,h)i;"])
>>> print(tree.root_at('c'))
(a,b,((d,e)f,(h)g)c)root;
<BLANKLINE>
"""
if isinstance(node, str):
node = self.find(node)
if not node.children:
raise TreeError("Can't use a tip (%s) as the root" %
repr(node.name))
return node.unrooted_deepcopy()
@experimental(as_of="0.4.0")
def root_at_midpoint(self):
r"""Return a new tree rooted at midpoint of the two tips farthest apart
This method doesn't preserve the internal node naming or structure,
but does keep tip to tip distances correct. Uses `unrooted_copy` but
operates on a full copy of the tree.
Raises
------
TreeError
If a tip ends up being the mid point
Returns
-------
TreeNode
A tree rooted at its midpoint
LengthError
Midpoint rooting requires `length` and will raise (indirectly) if
evaluated nodes don't have length.
See Also
--------
root_at
unrooted_deepcopy
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)a:1;"])
>>> print(tree.root_at_midpoint())
((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root;
<BLANKLINE>
"""
tree = self.copy()
max_dist, tips = tree.get_max_distance()
half_max_dist = max_dist / 2.0
if max_dist == 0.0: # only pathological cases with no lengths
return tree
tip1 = tree.find(tips[0])
tip2 = tree.find(tips[1])
lca = tree.lowest_common_ancestor([tip1, tip2])
if tip1.accumulate_to_ancestor(lca) > half_max_dist:
climb_node = tip1
else:
climb_node = tip2
dist_climbed = 0.0
while dist_climbed + climb_node.length < half_max_dist:
dist_climbed += climb_node.length
climb_node = climb_node.parent
# now midpt is either at on the branch to climb_node's parent
# or midpt is at climb_node's parent
if dist_climbed + climb_node.length == half_max_dist:
# climb to midpoint spot
climb_node = climb_node.parent
if climb_node.is_tip():
raise TreeError('error trying to root tree at tip')
else:
return climb_node.unrooted_copy()
else:
# make a new node on climb_node's branch to its parent
old_br_len = climb_node.length
new_root = tree.__class__()
climb_node.parent.append(new_root)
new_root.append(climb_node)
climb_node.length = half_max_dist - dist_climbed
new_root.length = old_br_len - climb_node.length
return new_root.unrooted_copy()
@experimental(as_of="0.4.0")
def is_tip(self):
r"""Returns `True` if the current node has no `children`.
Returns
-------
bool
`True` if the node is a tip
See Also
--------
is_root
has_children
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> print(tree.is_tip())
False
>>> print(tree.find('a').is_tip())
True
"""
return not self.children
@experimental(as_of="0.4.0")
def is_root(self):
r"""Returns `True` if the current is a root, i.e. has no `parent`.
Returns
-------
bool
`True` if the node is the root
See Also
--------
is_tip
has_children
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> print(tree.is_root())
True
>>> print(tree.find('a').is_root())
False
"""
return self.parent is None
@experimental(as_of="0.4.0")
def has_children(self):
r"""Returns `True` if the node has `children`.
Returns
-------
bool
`True` if the node has children.
See Also
--------
is_tip
is_root
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> print(tree.has_children())
True
>>> print(tree.find('a').has_children())
False
"""
return not self.is_tip()
@experimental(as_of="0.4.0")
def traverse(self, self_before=True, self_after=False, include_self=True):
r"""Returns iterator over descendants
This is a depth-first traversal. Since the trees are not binary,
preorder and postorder traversals are possible, but inorder traversals
would depend on the data in the tree and are not handled here.
Parameters
----------
self_before : bool
includes each node before its descendants if True
self_after : bool
includes each node after its descendants if True
include_self : bool
include the initial node if True
`self_before` and `self_after` are independent. If neither is `True`,
only terminal nodes will be returned.
Note that if self is terminal, it will only be included once even if
`self_before` and `self_after` are both `True`.
Yields
------
TreeNode
Traversed node.
See Also
--------
preorder
postorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> for node in tree.traverse():
... print(node.name)
None
c
a
b
"""
if self_before:
if self_after:
return self.pre_and_postorder(include_self=include_self)
else:
return self.preorder(include_self=include_self)
else:
if self_after:
return self.postorder(include_self=include_self)
else:
return self.tips(include_self=include_self)
@experimental(as_of="0.4.0")
def preorder(self, include_self=True):
r"""Performs preorder iteration over tree
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> for node in tree.preorder():
... print(node.name)
None
c
a
b
"""
stack = [self]
while stack:
curr = stack.pop()
if include_self or (curr is not self):
yield curr
if curr.children:
stack.extend(curr.children[::-1])
@experimental(as_of="0.4.0")
def postorder(self, include_self=True):
r"""Performs postorder iteration over tree.
This is somewhat inelegant compared to saving the node and its index
on the stack, but is 30% faster in the average case and 3x faster in
the worst case (for a comb tree).
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
preorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> for node in tree.postorder():
... print(node.name)
a
b
c
None
"""
child_index_stack = [0]
curr = self
curr_children = self.children
curr_children_len = len(curr_children)
while 1:
curr_index = child_index_stack[-1]
# if there are children left, process them
if curr_index < curr_children_len:
curr_child = curr_children[curr_index]
# if the current child has children, go there
if curr_child.children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.children
curr_children_len = len(curr_children)
curr_index = 0
# otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
# if there are no children left, return self, and move to
# self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.parent
curr_children = curr.children
curr_children_len = len(curr_children)
child_index_stack.pop()
child_index_stack[-1] += 1
@experimental(as_of="0.4.0")
def pre_and_postorder(self, include_self=True):
r"""Performs iteration over tree, visiting node before and after
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c);"])
>>> for node in tree.pre_and_postorder():
... print(node.name)
None
c
a
b
c
None
"""
# handle simple case first
if not self.children:
if include_self:
yield self
return
child_index_stack = [0]
curr = self
curr_children = self.children
while 1:
curr_index = child_index_stack[-1]
if not curr_index:
if include_self or (curr is not self):
yield curr
# if there are children left, process them
if curr_index < len(curr_children):
curr_child = curr_children[curr_index]
# if the current child has children, go there
if curr_child.children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.children
curr_index = 0
# otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
# if there are no children left, return self, and move to
# self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.parent
curr_children = curr.children
child_index_stack.pop()
child_index_stack[-1] += 1
@experimental(as_of="0.4.0")
def levelorder(self, include_self=True):
r"""Performs levelorder iteration over tree
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
>>> for node in tree.levelorder():
... print(node.name)
None
c
f
a
b
d
e
"""
queue = [self]
while queue:
curr = queue.pop(0)
if include_self or (curr is not self):
yield curr
if curr.children:
queue.extend(curr.children)
@experimental(as_of="0.4.0")
def tips(self, include_self=False):
r"""Iterates over tips descended from `self`.
Node order is consistent between calls and is ordered by a
postorder traversal of the tree.
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
levelorder
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
>>> for node in tree.tips():
... print(node.name)
a
b
d
e
"""
for n in self.postorder(include_self=include_self):
if n.is_tip():
yield n
@experimental(as_of="0.4.0")
def non_tips(self, include_self=False):
r"""Iterates over nontips descended from self
`include_self`, if `True` (default is False), will return the current
node as part of non_tips if it is a non_tip. Node order is consistent
between calls and is ordered by a postorder traversal of the tree.
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
levelorder
tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
>>> for node in tree.non_tips():
... print(node.name)
c
f
"""
for n in self.postorder(include_self):
if not n.is_tip():
yield n
@experimental(as_of="0.4.0")
def invalidate_caches(self, attr=True):
r"""Delete lookup and attribute caches
Parameters
----------
attr : bool, optional
If ``True``, invalidate attribute caches created by
`TreeNode.cache_attr`.
See Also
--------
create_caches
cache_attr
find
"""
if not self.is_root():
self.root().invalidate_caches()
else:
self._tip_cache = {}
self._non_tip_cache = {}
if self._registered_caches and attr:
for n in self.traverse():
for cache in self._registered_caches:
if hasattr(n, cache):
delattr(n, cache)
@experimental(as_of="0.4.0")
def create_caches(self):
r"""Construct an internal lookups to facilitate searching by name
This method will not cache nodes in which the .name is None. This
method will raise `DuplicateNodeError` if a name conflict in the tips
is discovered, but will not raise if on internal nodes. This is
because, in practice, the tips of a tree are required to be unique
while no such requirement holds for internal nodes.
Raises
------
DuplicateNodeError
The tip cache requires that names are unique (with the exception of
names that are None)
See Also
--------
invalidate_caches
cache_attr
find
"""
if not self.is_root():
self.root().create_caches()
else:
if self._tip_cache and self._non_tip_cache:
return
self.invalidate_caches(attr=False)
tip_cache = {}
non_tip_cache = defaultdict(list)
for node in self.postorder():
name = node.name
if name is None:
continue
if node.is_tip():
if name in tip_cache:
raise DuplicateNodeError("Tip with name '%s' already "
"exists." % name)
tip_cache[name] = node
else:
non_tip_cache[name].append(node)
self._tip_cache = tip_cache
self._non_tip_cache = non_tip_cache
@experimental(as_of="0.4.0")
def find_all(self, name):
r"""Find all nodes that match `name`
The first call to `find_all` will cache all nodes in the tree on the
assumption that additional calls to `find_all` will be made.
Parameters
----------
name : TreeNode or str
The name or node to find. If `name` is `TreeNode` then all other
nodes with the same name will be returned.
Raises
------
MissingNodeError
Raises if the node to be searched for is not found
Returns
-------
list of TreeNode
The nodes found
See Also
--------
find
find_by_id
find_by_func
Examples
--------
>>> from skbio.tree import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)d,(f,g)c);"])
>>> for node in tree.find_all('c'):
... print(node.name, node.children[0].name, node.children[1].name)
c a b
c f g
>>> for node in tree.find_all('d'):
... print(node.name, str(node))
d (d,e)d;
<BLANKLINE>
d d;
<BLANKLINE>
"""
root = self.root()
# if what is being passed in looks like a node, just return it
if isinstance(name, root.__class__):
return [name]
root.create_caches()
tip = root._tip_cache.get(name, None)
nodes = root._non_tip_cache.get(name, [])
nodes.append(tip) if tip is not None else None
if not nodes:
raise MissingNodeError("Node %s is not in self" % name)
else:
return nodes
@experimental(as_of="0.4.0")
def find(self, name):
r"""Find a node by `name`.
The first call to `find` will cache all nodes in the tree on the
assumption that additional calls to `find` will be made.
`find` will first attempt to find the node in the tips. If it cannot
find a corresponding tip, then it will search through the internal
nodes of the tree. In practice, phylogenetic trees and other common
trees in biology do not have unique internal node names. As a result,
this find method will only return the first occurance of an internal
node encountered on a postorder traversal of the tree.
Parameters
----------
name : TreeNode or str
The name or node to find. If `name` is `TreeNode` then it is
simply returned
Raises
------
MissingNodeError
Raises if the node to be searched for is not found
Returns
-------
TreeNode
The found node
See Also
--------
find_all
find_by_id
find_by_func
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
>>> print(tree.find('c').name)
c
"""
root = self.root()
# if what is being passed in looks like a node, just return it
if isinstance(name, root.__class__):
return name
root.create_caches()
node = root._tip_cache.get(name, None)
if node is None:
node = root._non_tip_cache.get(name, [None])[0]
if node is None:
raise MissingNodeError("Node %s is not in self" % name)
else:
return node
@experimental(as_of="0.4.0")
def find_by_id(self, node_id):
r"""Find a node by `id`.
This search method is based from the root.
Parameters
----------
node_id : int
The `id` of a node in the tree
Returns
-------
TreeNode
The tree node with the matching id
Notes
-----
This method does not cache id associations. A full traversal of the
tree is performed to find a node by an id on every call.
Raises
------
MissingNodeError
This method will raise if the `id` cannot be found
See Also
--------
find
find_all
find_by_func
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
>>> print(tree.find_by_id(2).name)
d
"""
# if this method gets used frequently, then we should cache by ID
# as well
root = self.root()
root.assign_ids()
node = None
for n in self.traverse(include_self=True):
if n.id == node_id:
node = n
break
if node is None:
raise MissingNodeError("ID %d is not in self" % node_id)
else:
return node
@experimental(as_of="0.4.0")
def find_by_func(self, func):
r"""Find all nodes given a function
This search method is based on the current subtree, not the root.
Parameters
----------
func : a function
A function that accepts a TreeNode and returns `True` or `False`,
where `True` indicates the node is to be yielded
Yields
------
TreeNode
Node found by `func`.
See Also
--------
find
find_all
find_by_id
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f);"])
>>> func = lambda x: x.parent == tree.find('c')
>>> [n.name for n in tree.find_by_func(func)]
['a', 'b']
"""
for node in self.traverse(include_self=True):
if func(node):
yield node
@experimental(as_of="0.4.0")
def ancestors(self):
r"""Returns all ancestors back to the root
This call will return all nodes in the path back to root, but does not
include the node instance that the call was made from.
Returns
-------
list of TreeNode
The path, toward the root, from self
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> [node.name for node in tree.find('a').ancestors()]
['c', 'root']
"""
result = []
curr = self
while not curr.is_root():
result.append(curr.parent)
curr = curr.parent
return result
@experimental(as_of="0.4.0")
def root(self):
r"""Returns root of the tree `self` is in
Returns
-------
TreeNode
The root of the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> tip_a = tree.find('a')
>>> root = tip_a.root()
>>> root == tree
True
"""
curr = self
while not curr.is_root():
curr = curr.parent
return curr
@experimental(as_of="0.4.0")
def siblings(self):
r"""Returns all nodes that are `children` of `self` `parent`.
This call excludes `self` from the list.
Returns
-------
list of TreeNode
The list of sibling nodes relative to self
See Also
--------
neighbors
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e,f)g)root;"])
>>> tip_e = tree.find('e')
>>> [n.name for n in tip_e.siblings()]
['d', 'f']
"""
if self.is_root():
return []
result = self.parent.children[:]
result.remove(self)
return result
@experimental(as_of="0.4.0")
def neighbors(self, ignore=None):
r"""Returns all nodes that are connected to self
This call does not include `self` in the result
Parameters
----------
ignore : TreeNode
A node to ignore
Returns
-------
list of TreeNode
The list of all nodes that are connected to self
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> node_c = tree.find('c')
>>> [n.name for n in node_c.neighbors()]
['a', 'b', 'root']
"""
nodes = [n for n in self.children + [self.parent] if n is not None]
if ignore is None:
return nodes
else:
return [n for n in nodes if n is not ignore]
@experimental(as_of="0.4.0")
def lowest_common_ancestor(self, tipnames):
r"""Lowest common ancestor for a list of tips
Parameters
----------
tipnames : list of TreeNode or str
The nodes of interest
Returns
-------
TreeNode
The lowest common ancestor of the passed in nodes
Raises
------
ValueError
If no tips could be found in the tree, or if not all tips were
found.
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> nodes = [tree.find('a'), tree.find('b')]
>>> lca = tree.lowest_common_ancestor(nodes)
>>> print(lca.name)
c
>>> nodes = [tree.find('a'), tree.find('e')]
>>> lca = tree.lca(nodes) # lca is an alias for convience
>>> print(lca.name)
root
"""
if len(tipnames) == 1:
return self.find(tipnames[0])
tips = [self.find(name) for name in tipnames]
if len(tips) == 0:
raise ValueError("No tips found.")
nodes_to_scrub = []
for t in tips:
if t.is_root():
# has to be the LCA...
return t
prev = t
curr = t.parent
while curr and not hasattr(curr, 'black'):
setattr(curr, 'black', [prev])
nodes_to_scrub.append(curr)
prev = curr
curr = curr.parent
# increase black count, multiple children lead to here
if curr:
curr.black.append(prev)
curr = self
while len(curr.black) == 1:
curr = curr.black[0]
# clean up tree
for n in nodes_to_scrub:
delattr(n, 'black')
return curr
lca = lowest_common_ancestor # for convenience
@classonlymethod
@experimental(as_of="0.4.0")
def from_taxonomy(cls, lineage_map):
r"""Construct a tree from a taxonomy
Parameters
----------
lineage_map : iterable of tuple
A id to lineage mapping where the first index is an ID and the
second index is an iterable of the lineage.
Returns
-------
TreeNode
The constructed taxonomy
Examples
--------
>>> from skbio.tree import TreeNode
>>> lineages = [
... ('1', ['Bacteria', 'Firmicutes', 'Clostridia']),
... ('2', ['Bacteria', 'Firmicutes', 'Bacilli']),
... ('3', ['Bacteria', 'Bacteroidetes', 'Sphingobacteria']),
... ('4', ['Archaea', 'Euryarchaeota', 'Thermoplasmata']),
... ('5', ['Archaea', 'Euryarchaeota', 'Thermoplasmata']),
... ('6', ['Archaea', 'Euryarchaeota', 'Halobacteria']),
... ('7', ['Archaea', 'Euryarchaeota', 'Halobacteria']),
... ('8', ['Bacteria', 'Bacteroidetes', 'Sphingobacteria']),
... ('9', ['Bacteria', 'Bacteroidetes', 'Cytophagia'])]
>>> tree = TreeNode.from_taxonomy(lineages)
>>> print(tree.ascii_art())
/Clostridia-1
/Firmicutes
| \Bacilli- /-2
/Bacteria|
| | /-3
| | /Sphingobacteria
| \Bacteroidetes \-8
| |
---------| \Cytophagia-9
|
| /-4
| /Thermoplasmata
| | \-5
\Archaea- /Euryarchaeota
| /-6
\Halobacteria
\-7
"""
root = cls(name=None)
root._lookup = {}
for id_, lineage in lineage_map:
cur_node = root
# for each name, see if we've seen it, if not, add that puppy on
for name in lineage:
if name in cur_node._lookup:
cur_node = cur_node._lookup[name]
else:
new_node = cls(name=name)
new_node._lookup = {}
cur_node._lookup[name] = new_node
cur_node.append(new_node)
cur_node = new_node
cur_node.append(cls(name=id_))
# scrub the lookups
for node in root.non_tips(include_self=True):
del node._lookup
return root
def _balanced_distance_to_tip(self):
"""Return the distance to tip from this node.
The distance to every tip from this node must be equal for this to
return a correct result.
Returns
-------
int
The distance to tip of a length-balanced tree
"""
node = self
distance = 0
while node.has_children():
distance += node.children[0].length
node = node.children[0]
return distance
@classonlymethod
@experimental(as_of="0.4.0")
def from_linkage_matrix(cls, linkage_matrix, id_list):
"""Return tree from SciPy linkage matrix.
Parameters
----------
linkage_matrix : ndarray
A SciPy linkage matrix as returned by
`scipy.cluster.hierarchy.linkage`
id_list : list
The indices of the `id_list` will be used in the linkage_matrix
Returns
-------
TreeNode
An unrooted bifurcated tree
See Also
--------
scipy.cluster.hierarchy.linkage
"""
tip_width = len(id_list)
cluster_count = len(linkage_matrix)
lookup_len = cluster_count + tip_width
node_lookup = np.empty(lookup_len, dtype=cls)
for i, name in enumerate(id_list):
node_lookup[i] = cls(name=name)
for i in range(tip_width, lookup_len):
node_lookup[i] = cls()
newest_cluster_index = cluster_count + 1
for link in linkage_matrix:
child_a = node_lookup[int(link[0])]
child_b = node_lookup[int(link[1])]
path_length = link[2] / 2
child_a.length = path_length - child_a._balanced_distance_to_tip()
child_b.length = path_length - child_b._balanced_distance_to_tip()
new_cluster = node_lookup[newest_cluster_index]
new_cluster.append(child_a)
new_cluster.append(child_b)
newest_cluster_index += 1
return node_lookup[-1]
@experimental(as_of="0.4.0")
def to_taxonomy(self, allow_empty=False, filter_f=None):
"""Returns a taxonomy representation of self
Parameters
----------
allow_empty : bool, optional
Allow gaps the taxonomy (e.g., internal nodes without names).
filter_f : function, optional
Specify a filtering function that returns True if the lineage is
to be returned. This function must accept a ``TreeNode`` as its
first parameter, and a ``list`` that represents the lineage as the
second parameter.
Yields
------
tuple
``(tip, [lineage])`` where ``tip`` corresponds to a tip in the tree
and ``[lineage]`` is the expanded names from root to tip. ``None``
and empty strings are omitted from the lineage.
Notes
-----
If ``allow_empty`` is ``True`` and the root node does not have a name,
then that name will not be included. This is because it is common to
have multiple domains represented in the taxonomy, which would result
in a root node that does not have a name and does not make sense to
represent in the output.
Examples
--------
>>> from skbio.tree import TreeNode
>>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
... '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
... '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
>>> tree = TreeNode.from_taxonomy(lineages.items())
>>> lineages = sorted([(n.name, l) for n, l in tree.to_taxonomy()])
>>> for name, lineage in lineages:
... print(name, '; '.join(lineage))
1 Bacteria; Firmicutes; Clostridia
2 Bacteria; Firmicutes; Bacilli
3 Bacteria; Bacteroidetes; Sphingobacteria
4 Archaea; Euryarchaeota; Thermoplasmata
5 Archaea; Euryarchaeota; Thermoplasmata
6 Archaea; Euryarchaeota; Halobacteria
7 Archaea; Euryarchaeota; Halobacteria
8 Bacteria; Bacteroidetes; Sphingobacteria
9 Bacteria; Bacteroidetes; Cytophagia
"""
if filter_f is None:
def filter_f(a, b):
return True
self.assign_ids()
seen = set()
lineage = []
# visit internal nodes while traversing out to the tips, and on the
# way back up
for node in self.traverse(self_before=True, self_after=True):
if node.is_tip():
if filter_f(node, lineage):
yield (node, lineage[:])
else:
if allow_empty:
if node.is_root() and not node.name:
continue
else:
if not node.name:
continue
if node.id in seen:
lineage.pop(-1)
else:
lineage.append(node.name)
seen.add(node.id)
@experimental(as_of="0.4.0")
def to_array(self, attrs=None, nan_length_value=None):
"""Return an array representation of self
Parameters
----------
attrs : list of tuple or None
The attributes and types to return. The expected form is
[(attribute_name, type)]. If `None`, then `name`, `length`, and
`id` are returned.
nan_length_value : float, optional
If provided, replaces any `nan` in the branch length vector
(i.e., ``result['length']``) with this value. `nan` branch lengths
can arise from an edge not having a length (common for the root
node parent edge), which can making summing problematic.
Returns
-------
dict of array
{id_index: {id: TreeNode},
child_index: ((node_id, left_child_id, right_child_id)),
attr_1: array(...),
...
attr_N: array(...)}
Notes
-----
Attribute arrays are in index order such that TreeNode.id can be used
as a lookup into the array.
Examples
--------
>>> from pprint import pprint
>>> from skbio import TreeNode
>>> t = TreeNode.read(['(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'])
>>> res = t.to_array()
>>> sorted(res.keys())
['child_index', 'id', 'id_index', 'length', 'name']
>>> res['child_index']
array([[4, 0, 2],
[5, 3, 3],
[6, 4, 5],
[7, 6, 6]])
>>> for k, v in res['id_index'].items():
... print(k, v)
...
0 a:1.0;
<BLANKLINE>
1 b:2.0;
<BLANKLINE>
2 c:3.0;
<BLANKLINE>
3 d:5.0;
<BLANKLINE>
4 (a:1.0,b:2.0,c:3.0)x:4.0;
<BLANKLINE>
5 (d:5.0)y:6.0;
<BLANKLINE>
6 ((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0;
<BLANKLINE>
7 (((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0);
<BLANKLINE>
>>> res['id']
array([0, 1, 2, 3, 4, 5, 6, 7])
>>> res['name']
array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object)
"""
if attrs is None:
attrs = [('name', object), ('length', float), ('id', int)]
else:
for attr, dtype in attrs:
if not hasattr(self, attr):
raise AttributeError("Invalid attribute '%s'." % attr)
id_index, child_index = self.index_tree()
n = self.id + 1 # assign_ids starts at 0
tmp = [np.zeros(n, dtype=dtype) for attr, dtype in attrs]
for node in self.traverse(include_self=True):
n_id = node.id
for idx, (attr, dtype) in enumerate(attrs):
tmp[idx][n_id] = getattr(node, attr)
results = {'id_index': id_index, 'child_index': child_index}
results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)})
if nan_length_value is not None:
length_v = results['length']
length_v[np.isnan(length_v)] = nan_length_value
return results
def _ascii_art(self, char1='-', show_internal=True, compact=False):
LEN = 10
PAD = ' ' * LEN
PA = ' ' * (LEN - 1)
namestr = self._node_label()
if self.children:
mids = []
result = []
for c in self.children:
if c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._ascii_art(char2, show_internal, compact)
mids.append(mid + len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo + 1) + [PA + '|'] * \
(hi - lo - 1) + [PAD] * (end - hi)
mid = np.int(np.trunc((lo + hi) / 2))
prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1]
result = [p + l for (p, l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + namestr + stem[len(namestr) + 1:]
return (result, mid)
else:
return ([char1 + '-' + namestr], 0)
@experimental(as_of="0.4.0")
def ascii_art(self, show_internal=True, compact=False):
r"""Returns a string containing an ascii drawing of the tree
Note, this method calls a private recursive function and is not safe
for large trees.
Parameters
----------
show_internal : bool
includes internal edge names
compact : bool
use exactly one line per tip
Returns
-------
str
an ASCII formatted version of the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"])
>>> print(tree.ascii_art())
/-a
/c-------|
| \-b
-root----|
| /-d
\f-------|
\-e
"""
(lines, mid) = self._ascii_art(show_internal=show_internal,
compact=compact)
return '\n'.join(lines)
@experimental(as_of="0.4.0")
def accumulate_to_ancestor(self, ancestor):
r"""Return the sum of the distance between self and ancestor
Parameters
----------
ancestor : TreeNode
The node of the ancestor to accumulate distance too
Returns
-------
float
The sum of lengths between self and ancestor
Raises
------
NoParentError
A NoParentError is raised if the ancestor is not an ancestor of
self
NoLengthError
A NoLengthError is raised if one of the nodes between self and
ancestor (including self) lacks a `length` attribute
See Also
--------
distance
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> root = tree
>>> tree.find('a').accumulate_to_ancestor(root)
4.0
"""
accum = 0.0
curr = self
while curr is not ancestor:
if curr.is_root():
raise NoParentError("Provided ancestor is not in the path")
if curr.length is None:
raise NoLengthError("No length on node %s found." %
curr.name or "unnamed")
accum += curr.length
curr = curr.parent
return accum
@experimental(as_of="0.4.0")
def distance(self, other):
"""Return the distance between self and other
This method can be used to compute the distances between two tips,
however, it is not optimized for computing pairwise tip distances.
Parameters
----------
other : TreeNode
The node to compute a distance to
Returns
-------
float
The distance between two nodes
Raises
------
NoLengthError
A NoLengthError will be raised if a node without `length` is
encountered
See Also
--------
tip_tip_distances
accumulate_to_ancestor
compare_tip_distances
get_max_distance
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> tip_a = tree.find('a')
>>> tip_d = tree.find('d')
>>> tip_a.distance(tip_d)
14.0
"""
if self is other:
return 0.0
self_ancestors = [self] + list(self.ancestors())
other_ancestors = [other] + list(other.ancestors())
if self in other_ancestors:
return other.accumulate_to_ancestor(self)
elif other in self_ancestors:
return self.accumulate_to_ancestor(other)
else:
root = self.root()
lca = root.lowest_common_ancestor([self, other])
accum = self.accumulate_to_ancestor(lca)
accum += other.accumulate_to_ancestor(lca)
return accum
def _set_max_distance(self):
"""Propagate tip distance information up the tree
This method was originally implemented by Julia Goodrich with the
intent of being able to determine max tip to tip distances between
nodes on large trees efficiently. The code has been modified to track
the specific tips the distance is between
"""
maxkey = itemgetter(0)
for n in self.postorder():
if n.is_tip():
n.MaxDistTips = ((0.0, n), (0.0, n))
else:
if len(n.children) == 1:
raise TreeError("No support for single descedent nodes")
else:
tip_info = [(max(c.MaxDistTips, key=maxkey), c)
for c in n.children]
dists = [i[0][0] for i in tip_info]
best_idx = np.argsort(dists)[-2:]
(tip_a_d, tip_a), child_a = tip_info[best_idx[0]]
(tip_b_d, tip_b), child_b = tip_info[best_idx[1]]
tip_a_d += child_a.length or 0.0
tip_b_d += child_b.length or 0.0
n.MaxDistTips = ((tip_a_d, tip_a), (tip_b_d, tip_b))
def _get_max_distance_singledesc(self):
"""returns the max distance between any pair of tips
Also returns the tip names that it is between as a tuple"""
distmtx = self.tip_tip_distances()
idx_max = divmod(distmtx.data.argmax(), distmtx.shape[1])
max_pair = (distmtx.ids[idx_max[0]], distmtx.ids[idx_max[1]])
return distmtx[idx_max], max_pair
@experimental(as_of="0.4.0")
def get_max_distance(self):
"""Returns the max tip tip distance between any pair of tips
Returns
-------
float
The distance between the two most distant tips in the tree
tuple of TreeNode
The two most distant tips in the tree
Raises
------
NoLengthError
A NoLengthError will be thrown if a node without length is
encountered
See Also
--------
distance
tip_tip_distances
compare_tip_distances
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> dist, tips = tree.get_max_distance()
>>> dist
16.0
>>> [n.name for n in tips]
['b', 'e']
"""
if not hasattr(self, 'MaxDistTips'):
# _set_max_distance will throw a TreeError if a node with a single
# child is encountered
try:
self._set_max_distance()
except TreeError: #
return self._get_max_distance_singledesc()
longest = 0.0
tips = [None, None]
for n in self.non_tips(include_self=True):
tip_a, tip_b = n.MaxDistTips
dist = (tip_a[0] + tip_b[0])
if dist > longest:
longest = dist
tips = [tip_a[1], tip_b[1]]
return longest, tips
@experimental(as_of="0.4.0")
def tip_tip_distances(self, endpoints=None):
"""Returns distance matrix between pairs of tips, and a tip order.
By default, all pairwise distances are calculated in the tree. If
`endpoints` are specified, then only the distances between those tips
are computed.
Parameters
----------
endpoints : list of TreeNode or str, or None
A list of TreeNode objects or names of TreeNode objects
Returns
-------
DistanceMatrix
The distance matrix
Raises
------
ValueError
If any of the specified `endpoints` are not tips
See Also
--------
distance
compare_tip_distances
Notes
-----
If a node does not have an associated length, 0.0 will be used and a
``RepresentationWarning`` will be raised.
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> mat = tree.tip_tip_distances()
>>> print(mat)
4x4 distance matrix
IDs:
'a', 'b', 'd', 'e'
Data:
[[ 0. 3. 14. 15.]
[ 3. 0. 15. 16.]
[ 14. 15. 0. 9.]
[ 15. 16. 9. 0.]]
"""
all_tips = list(self.tips())
if endpoints is None:
tip_order = all_tips
else:
tip_order = [self.find(n) for n in endpoints]
for n in tip_order:
if not n.is_tip():
raise ValueError("Node with name '%s' is not a tip." %
n.name)
# linearize all tips in postorder
# .__start, .__stop compose the slice in tip_order.
for i, node in enumerate(all_tips):
node.__start, node.__stop = i, i + 1
# the result map provides index in the result matrix
result_map = {n.__start: i for i, n in enumerate(tip_order)}
num_all_tips = len(all_tips) # total number of tips
num_tips = len(tip_order) # total number of tips in result
result = np.zeros((num_tips, num_tips), float) # tip by tip matrix
distances = np.zeros((num_all_tips), float) # dist from tip to tip
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in combinations(node.children, 2):
for tip1 in range(child1.__start, child1.__stop):
if tip1 not in result_map:
continue
t1idx = result_map[tip1]
for tip2 in range(child2.__start, child2.__stop):
if tip2 not in result_map:
continue
t2idx = result_map[tip2]
result[t1idx, t2idx] = distances[
tip1] + distances[tip2]
for node in self.postorder():
if not node.children:
continue
# subtree with solved child wedges
# can possibly use np.zeros
starts, stops = [], [] # to calc ._start and ._stop for curr node
for child in node.children:
length = child.length
if length is None:
warnings.warn(
"`TreeNode.tip_tip_distances`: Node with name %r does "
"not have an associated length, so a length of 0.0 "
"will be used." % child.name, RepresentationWarning)
length = 0.0
distances[child.__start:child.__stop] += length
starts.append(child.__start)
stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
if len(node.children) > 1:
update_result()
return DistanceMatrix(result + result.T, [n.name for n in tip_order])
@experimental(as_of="0.4.0")
def compare_rfd(self, other, proportion=False):
"""Calculates the Robinson and Foulds symmetric difference
Parameters
----------
other : TreeNode
A tree to compare against
proportion : bool
Return a proportional difference
Returns
-------
float
The distance between the trees
Notes
-----
Implementation based off of code by Julia Goodrich. The original
description of the algorithm can be found in [1]_.
Raises
------
ValueError
If the tip names between `self` and `other` are equal.
See Also
--------
compare_subsets
compare_tip_distances
References
----------
.. [1] Comparison of phylogenetic trees. Robinson and Foulds.
Mathematical Biosciences. 1981. 53:131-141
Examples
--------
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read(["((a,b),(c,d));"])
>>> tree2 = TreeNode.read(["(((a,b),c),d);"])
>>> tree1.compare_rfd(tree2)
2.0
"""
t1names = {n.name for n in self.tips()}
t2names = {n.name for n in other.tips()}
if t1names != t2names:
if t1names < t2names:
tree1 = self
tree2 = other.shear(t1names)
else:
tree1 = self.shear(t2names)
tree2 = other
else:
tree1 = self
tree2 = other
tree1_sets = tree1.subsets()
tree2_sets = tree2.subsets()
not_in_both = tree1_sets.symmetric_difference(tree2_sets)
dist = float(len(not_in_both))
if proportion:
total_subsets = len(tree1_sets) + len(tree2_sets)
dist = dist / total_subsets
return dist
@experimental(as_of="0.4.0")
def compare_subsets(self, other, exclude_absent_taxa=False):
"""Returns fraction of overlapping subsets where self and other differ.
Names present in only one of the two trees will count as mismatches,
if you don't want this behavior, strip out the non-matching tips first.
Parameters
----------
other : TreeNode
The tree to compare
exclude_absent_taxa : bool
Strip out names that don't occur in both trees
Returns
-------
float
The fraction of overlapping subsets that differ between the trees
See Also
--------
compare_rfd
compare_tip_distances
subsets
Examples
--------
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read(["((a,b),(c,d));"])
>>> tree2 = TreeNode.read(["(((a,b),c),d);"])
>>> tree1.compare_subsets(tree2)
0.5
"""
self_sets, other_sets = self.subsets(), other.subsets()
if exclude_absent_taxa:
in_both = self.subset() & other.subset()
self_sets = (i & in_both for i in self_sets)
self_sets = frozenset({i for i in self_sets if len(i) > 1})
other_sets = (i & in_both for i in other_sets)
other_sets = frozenset({i for i in other_sets if len(i) > 1})
total_subsets = len(self_sets) + len(other_sets)
intersection_length = len(self_sets & other_sets)
if not total_subsets: # no common subsets after filtering, so max dist
return 1
return 1 - (2 * intersection_length / float(total_subsets))
@experimental(as_of="0.4.0")
def compare_tip_distances(self, other, sample=None, dist_f=distance_from_r,
shuffle_f=np.random.shuffle):
"""Compares self to other using tip-to-tip distance matrices.
Value returned is `dist_f(m1, m2)` for the two matrices. Default is
to use the Pearson correlation coefficient, with +1 giving a distance
of 0 and -1 giving a distance of +1 (the maximum possible value).
Depending on the application, you might instead want to use
distance_from_r_squared, which counts correlations of both +1 and -1
as identical (0 distance).
Note: automatically strips out the names that don't match (this is
necessary for this method because the distance between non-matching
names and matching names is undefined in the tree where they don't
match, and because we need to reorder the names in the two trees to
match up the distance matrices).
Parameters
----------
other : TreeNode
The tree to compare
sample : int or None
Randomly subsample the tips in common between the trees to
compare. This is useful when comparing very large trees.
dist_f : function
The distance function used to compare two the tip-tip distance
matrices
shuffle_f : function
The shuffling function used if `sample` is not None
Returns
-------
float
The distance between the trees
Raises
------
ValueError
A ValueError is raised if there does not exist common tips
between the trees
See Also
--------
compare_subsets
compare_rfd
Examples
--------
>>> from skbio import TreeNode
>>> # note, only three common taxa between the trees
>>> tree1 = TreeNode.read(["((a:1,b:1):2,(c:0.5,X:0.7):3);"])
>>> tree2 = TreeNode.read(["(((a:1,b:1,Y:1):2,c:3):1,Z:4);"])
>>> dist = tree1.compare_tip_distances(tree2)
>>> print("%.9f" % dist)
0.000133446
"""
self_names = {i.name: i for i in self.tips()}
other_names = {i.name: i for i in other.tips()}
common_names = frozenset(self_names) & frozenset(other_names)
common_names = list(common_names)
if not common_names:
raise ValueError("No tip names in common between the two trees.")
if len(common_names) <= 2:
return 1 # the two trees must match by definition in this case
if sample is not None:
shuffle_f(common_names)
common_names = common_names[:sample]
self_nodes = [self_names[k] for k in common_names]
other_nodes = [other_names[k] for k in common_names]
self_matrix = self.tip_tip_distances(endpoints=self_nodes)
other_matrix = other.tip_tip_distances(endpoints=other_nodes)
return dist_f(self_matrix, other_matrix)
@experimental(as_of="0.4.2")
def bifurcate(self, insert_length=None):
r"""Reorders the tree into a bifurcating tree.
All nodes that have more than 2 children will
have additional intermediate nodes inserted to ensure that
every node has only 2 children.
Parameters
----------
insert_length : int, optional
The branch length assigned to all inserted nodes.
See Also
--------
prune
Notes
-----
Any nodes that have a single child can be collapsed using the
prune method to create strictly bifurcating trees.
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b,g,h)c,(d,e)f)root;"])
>>> print(tree.ascii_art())
/-a
|
|--b
/c-------|
| |--g
| |
-root----| \-h
|
| /-d
\f-------|
\-e
>>> tree.bifurcate()
>>> print(tree.ascii_art())
/-h
/c-------|
| | /-g
| \--------|
| | /-a
-root----| \--------|
| \-b
|
| /-d
\f-------|
\-e
"""
for n in self.traverse(include_self=True):
if len(n.children) > 2:
stack = n.children
while len(stack) > 2:
ind = stack.pop()
intermediate = self.__class__()
intermediate.length = insert_length
intermediate.extend(stack)
n.append(intermediate)
for k in stack:
n.remove(k)
n.extend([ind, intermediate])
@experimental(as_of="0.4.0")
def index_tree(self):
"""Index a tree for rapid lookups within a tree array
Indexes nodes in-place as `n._leaf_index`.
Returns
-------
dict
A mapping {node_id: TreeNode}
np.array of ints
This arrays describes the IDs of every internal node, and the ID
range of the immediate descendents. The first column in the array
corresponds to node_id. The second column is the left most
descendent's ID. The third column is the right most descendent's
ID.
"""
self.assign_ids()
id_index = {}
child_index = []
for n in self.postorder():
for c in n.children:
id_index[c.id] = c
if c:
# c has children itself, so need to add to result
child_index.append((c.id,
c.children[0].id,
c.children[-1].id))
# handle root, which should be t itself
id_index[self.id] = self
# only want to add to the child_index if self has children...
if self.children:
child_index.append((self.id,
self.children[0].id,
self.children[-1].id))
child_index = np.asarray(child_index, dtype=np.int64)
child_index = np.atleast_2d(child_index)
return id_index, child_index
@experimental(as_of="0.4.0")
def assign_ids(self):
"""Assign topologically stable unique ids to self
Following the call, all nodes in the tree will have their id
attribute set
"""
curr_index = 0
for n in self.postorder():
for c in n.children:
c.id = curr_index
curr_index += 1
self.id = curr_index
@experimental(as_of="0.4.0")
def descending_branch_length(self, tip_subset=None):
"""Find total descending branch length from self or subset of self tips
Parameters
----------
tip_subset : Iterable, or None
If None, the total descending branch length for all tips in the
tree will be returned. If a list of tips is provided then only the
total descending branch length associated with those tips will be
returned.
Returns
-------
float
The total descending branch length for the specified set of tips.
Raises
------
ValueError
A ValueError is raised if the list of tips supplied to tip_subset
contains internal nodes or non-tips.
Notes
-----
This function replicates cogent's totalDescendingBranch Length method
and extends that method to allow the calculation of total descending
branch length of a subset of the tips if requested. The postorder
guarantees that the function will always be able to add the descending
branch length if the node is not a tip.
Nodes with no length will have their length set to 0. The root length
(if it exists) is ignored.
Examples
--------
>>> from skbio import TreeNode
>>> tr = TreeNode.read(["(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,"
... "(H:.4,I:.5)J:1.3)K;"])
>>> tdbl = tr.descending_branch_length()
>>> sdbl = tr.descending_branch_length(['A','E'])
>>> print(round(tdbl, 1), round(sdbl, 1))
8.9 2.2
"""
self.assign_ids()
if tip_subset is not None:
all_tips = self.subset()
if not set(tip_subset).issubset(all_tips):
raise ValueError('tip_subset contains ids that aren\'t tip '
'names.')
lca = self.lowest_common_ancestor(tip_subset)
ancestors = {}
for tip in tip_subset:
curr = self.find(tip)
while curr is not lca:
ancestors[curr.id] = curr.length if curr.length is not \
None else 0.0
curr = curr.parent
return sum(ancestors.values())
else:
return sum(n.length for n in self.postorder(include_self=True) if
n.length is not None)
@experimental(as_of="0.4.0")
def cache_attr(self, func, cache_attrname, cache_type=list):
"""Cache attributes on internal nodes of the tree
Parameters
----------
func : function
func will be provided the node currently being evaluated and must
return a list of item (or items) to cache from that node or an
empty list.
cache_attrname : str
Name of the attribute to decorate on containing the cached values
cache_type : {set, frozenset, list}
The type of the cache
Notes
-----
This method is particularly useful if you need to frequently look up
attributes that would normally require a traversal of the tree.
WARNING: any cache created by this method will be invalidated if the
topology of the tree changes (e.g., if `TreeNode.invalidate_caches` is
called).
Raises
------
TypeError
If an cache_type that is not a `set` or a `list` is specified.
Examples
--------
Cache the tip names of the tree on its internal nodes
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b,(c,d)e)f,(g,h)i)root;"])
>>> f = lambda n: [n.name] if n.is_tip() else []
>>> tree.cache_attr(f, 'tip_names')
>>> for n in tree.traverse(include_self=True):
... print("Node name: %s, cache: %r" % (n.name, n.tip_names))
Node name: root, cache: ['a', 'b', 'c', 'd', 'g', 'h']
Node name: f, cache: ['a', 'b', 'c', 'd']
Node name: a, cache: ['a']
Node name: b, cache: ['b']
Node name: e, cache: ['c', 'd']
Node name: c, cache: ['c']
Node name: d, cache: ['d']
Node name: i, cache: ['g', 'h']
Node name: g, cache: ['g']
Node name: h, cache: ['h']
"""
if cache_type in [set, frozenset]:
def reduce_f(a, b):
return a | b
elif cache_type == list:
def reduce_f(a, b):
return a + b
else:
raise TypeError("Only list, set and frozenset are supported.")
for node in self.postorder(include_self=True):
node._registered_caches.add(cache_attrname)
cached = [getattr(c, cache_attrname) for c in node.children]
cached.append(cache_type(func(node)))
setattr(node, cache_attrname, reduce(reduce_f, cached))
@experimental(as_of="0.4.0")
def shuffle(self, k=None, names=None, shuffle_f=np.random.shuffle, n=1):
"""Yield trees with shuffled tip names
Parameters
----------
k : int, optional
The number of tips to shuffle. If k is not `None`, k tips are
randomly selected, and only those names will be shuffled.
names : list, optional
The specific tip names to shuffle. k and names cannot be specified
at the same time.
shuffle_f : func
Shuffle method, this function must accept a list and modify
inplace.
n : int, optional
The number of iterations to perform. Value must be > 0 and `np.inf`
can be specified for an infinite number of iterations.
Notes
-----
Tip names are shuffled inplace. If neither `k` nor `names` are
provided, all tips are shuffled.
Yields
------
TreeNode
Tree with shuffled tip names.
Raises
------
ValueError
If `k` is < 2
If `n` is < 1
ValueError
If both `k` and `names` are specified
MissingNodeError
If `names` is specified but one of the names cannot be found
Examples
--------
Alternate the names on two of the tips, 'a', and 'b', and do this 5
times.
>>> from skbio import TreeNode
>>> tree = TreeNode.read(["((a,b),(c,d));"])
>>> rev = lambda items: items.reverse()
>>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5)
>>> for shuffled_tree in shuffler:
... print(shuffled_tree)
((b,a),(c,d));
<BLANKLINE>
((a,b),(c,d));
<BLANKLINE>
((b,a),(c,d));
<BLANKLINE>
((a,b),(c,d));
<BLANKLINE>
((b,a),(c,d));
<BLANKLINE>
"""
if k is not None and k < 2:
raise ValueError("k must be None or >= 2")
if k is not None and names is not None:
raise ValueError("n and names cannot be specified at the sametime")
if n < 1:
raise ValueError("n must be > 0")
self.assign_ids()
if names is None:
all_tips = list(self.tips())
if n is None:
n = len(all_tips)
shuffle_f(all_tips)
names = [tip.name for tip in all_tips[:k]]
nodes = [self.find(name) for name in names]
# Since the names are being shuffled, the association between ID and
# name is no longer reliable
self.invalidate_caches()
counter = 0
while counter < n:
shuffle_f(names)
for node, name in zip(nodes, names):
node.name = name
yield self
counter += 1
@experimental(as_of="0.5.6")
def _extract_support(self):
"""Extract the support value from a node label, if available.
Returns
-------
tuple of
int, float or None
The support value extracted from the node label
str or None
The node label with the support value stripped
"""
support, label = None, None
if self.name:
# separate support value from node name by the first colon
left, _, right = self.name.partition(':')
try:
support = int(left)
except ValueError:
try:
support = float(left)
except ValueError:
pass
# strip support value from node name
label = right or None if support is not None else self.name
return support, label
@experimental(as_of="0.5.6")
def _node_label(self):
"""Generate a node label in the format of "support:name" if both exist,
or "support" or "name" if either exists.
Returns
-------
str
Generated node label
"""
lblst = []
if self.support is not None: # prevents support of NoneType
lblst.append(str(self.support))
if self.name: # prevents name of NoneType
lblst.append(self.name)
return ':'.join(lblst)
@experimental(as_of="0.5.6")
def assign_supports(self):
"""Extract support values from internal node labels of a tree.
Notes
-----
A "support value" measures the confidence or frequency of the incoming
branch (the branch from parent to self) of an internal node in a tree.
Roots and tips do not have support values. To extract a support value
from a node label, this method reads from left and stops at the first
":" (if any), and attempts to convert it to a number.
For examples: "(a,b)1.0", "(a,b)1.0:2.5", and "(a,b)'1.0:species_A'".
In these cases the support values are all 1.0.
For examples: "(a,b):1.0" and "(a,b)species_A". In these cases there
are no support values.
If a support value is successfully extracted, it will be stripped from
the node label and assigned to the `support` property.
IMPORTANT: mathematically, "support value" is a property of a branch,
not a node. Because of historical reasons, support values are usually
attached to nodes in a typical tree file [1].
[1] Czech, Lucas, Jaime Huerta-Cepas, and Alexandros Stamatakis. "A
Critical Review on the Use of Support Values in Tree Viewers and
Bioinformatics Toolkits." Molecular biology and evolution 34.6
(2017): 1535-1542.
Examples
--------
>>> from skbio import TreeNode
>>> newick = "((a,b)95,(c,d):1.1,(e,f)'80:speciesA':1.0);"
>>> tree = TreeNode.read([newick])
>>> tree.assign_supports()
>>> tree.lca(['a', 'b']).support
95
>>> tree.lca(['c', 'd']).support is None
True
>>> tree.lca(['e', 'f']).support
80
>>> tree.lca(['e', 'f']).name
'speciesA'
"""
for node in self.traverse():
if node.is_root() or node.is_tip():
node.support = None
else:
node.support, node.name = node._extract_support()
@experimental(as_of="0.5.3")
def unpack(self):
"""Unpack an internal node in place.
Notes
-----
This function sequentially: 1) elongates child nodes by branch length
of self (omit if there is no branch length), 2) removes self from
parent node, and 3) grafts child nodes to parent node.
Raises
------
ValueError
if input node is root or tip
See also
--------
unpack_by_func
prune
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((c:2.0,d:3.0)a:1.0,(e:2.0,f:1.0)b:2.0);'])
>>> tree.find('b').unpack()
>>> print(tree)
((c:2.0,d:3.0)a:1.0,e:4.0,f:3.0);
<BLANKLINE>
"""
if self.is_root():
raise TreeError('Cannot unpack root.')
if self.is_tip():
raise TreeError('Cannot unpack tip.')
parent = self.parent
blen = (self.length or 0.0)
for child in self.children:
clen = (child.length or 0.0)
child.length = (clen + blen or None)
parent.remove(self)
parent.extend(self.children)
@experimental(as_of="0.5.3")
def unpack_by_func(self, func):
"""Unpack internal nodes of a tree that meet certain criteria.
Parameters
----------
func : function
a function that accepts a TreeNode and returns `True` or `False`,
where `True` indicates the node is to be unpacked
See also
--------
unpack
prune
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);'])
>>> tree.unpack_by_func(lambda x: x.length <= 1)
>>> print(tree)
((e:1.0,f:2.0)b:2.0,c:3.0,d:4.0);
<BLANKLINE>
>>> tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);'])
>>> tree.assign_supports()
>>> tree.unpack_by_func(lambda x: x.support < 75)
>>> print(tree)
(((a,b)85,(c,d)78)75,(e,f,g)80);
<BLANKLINE>
"""
nodes_to_unpack = []
for node in self.non_tips(include_self=False):
if func(node):
nodes_to_unpack.append(node)
for node in nodes_to_unpack:
node.unpack()
|
gregcaporaso/scikit-bio
|
skbio/tree/_tree.py
|
Python
|
bsd-3-clause
| 100,088
|
import os
import shutil
import zipfile
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.signals import request_finished
from django.db import models
from uploadtemplate.utils import list_files
class ThemeManager(models.Manager):
def __init__(self):
super(ThemeManager, self).__init__()
self._cache = {}
def get_cached(self, site, using):
if isinstance(site, Site):
site = site.pk
site_pk = int(site)
if (using, site_pk) not in self._cache:
try:
theme = self.get(site=site_pk, default=True)
except self.model.DoesNotExist:
theme = None
self._cache[(using, site_pk)] = theme
theme = self._cache[(using, site_pk)]
if theme is None:
raise self.model.DoesNotExist
return theme
def get_current(self):
"""
Shortcut for getting the currently-active instance from the cache.
"""
site = settings.SITE_ID
using = self._db if self._db is not None else 'default'
return self.get_cached(site, using)
def clear_cache(self):
self._cache = {}
def _post_save(self, sender, instance, created, raw, using, **kwargs):
if instance.default:
self._cache[(using, instance.site_id)] = instance
elif ((using, instance.site_id) in self._cache and
self._cache[(using, instance.site_id)] == instance):
self._cache[(using, instance.site_id)] = None
def contribute_to_class(self, model, name):
# In addition to the normal contributions, we also attach a post-save
# listener to cache newly-saved instances immediately. This is
# post-save to make sure that we don't cache anything invalid.
super(ThemeManager, self).contribute_to_class(model, name)
if not model._meta.abstract:
models.signals.post_save.connect(self._post_save, sender=model)
class Theme(models.Model):
site = models.ForeignKey('sites.Site')
name = models.CharField(max_length=255)
theme_files_zip = models.FileField(upload_to='uploadtemplate/files/%Y/%m/%d',
blank=True)
thumbnail = models.ImageField(
upload_to='uploadtemplate/thumbnails/%Y/%m/%d',
blank=True)
description = models.TextField(blank=True)
default = models.BooleanField(default=False)
objects = ThemeManager()
def __unicode__(self):
if self.default:
return u'%s (default)' % self.name
else:
return self.name
@models.permalink
def get_absolute_url(self):
return ['uploadtemplate-set_default', (self.pk,)]
@property
def theme_files_dir(self):
if self.pk is None:
raise AttributeError("Themes with no pk have no theme files directory.")
return 'uploadtemplate/themes/{pk}/'.format(pk=self.pk)
def save_files(self):
if not self.theme_files_zip:
return
zip_file = zipfile.ZipFile(self.theme_files_zip)
# Unzip and replace any files.
for filename in zip_file.namelist():
# skip any zipped directories.
if filename.endswith('/'):
continue
name = os.path.join(self.theme_files_dir, filename)
fp = ContentFile(zip_file.read(filename))
if default_storage.exists(name):
default_storage.delete(name)
default_storage.save(name, fp)
def list_files(self):
return list_files(self.theme_files_dir)
def prune_files(self):
"""
Removes files from the theme's directory that aren't in the theme's
zipfile.
"""
if self.theme_files_zip:
zip_file = zipfile.ZipFile(self.theme_files_zip)
expected_files = set((os.path.join(self.theme_files_dir, name)
for name in zip_file.namelist()))
else:
expected_files = set()
found_files = set(self.list_files())
to_prune = found_files - expected_files
for name in to_prune:
default_storage.delete(name)
def delete_files(self):
"""
Removes all files from the theme's directory.
"""
for name in self.list_files():
default_storage.delete(name)
def delete(self, *args, **kwargs):
self.delete_files()
# Backwards-compat: Try to delete the old directories too.
try:
shutil.rmtree(self.static_root())
except OSError, e:
if e.errno == 2: # no such file:
pass
else:
raise
try:
shutil.rmtree(self.template_dir())
except OSError, e:
if e.errno == 2: # no such file
pass
else:
raise
Theme.objects._post_save(None, self, None, None, using=self._state.db)
super(Theme, self).delete(*args, **kwargs)
# Required for backwards-compatibility shims for get_static_url.
def static_root(self):
return '%sstatic/%i/' % (settings.UPLOADTEMPLATE_MEDIA_ROOT, self.pk)
# Required for backwards-compatibility shims for get_static_url.
def static_url(self):
return '%sstatic/%i/' % (settings.UPLOADTEMPLATE_MEDIA_URL, self.pk)
# Required for backwards-compatibility shims for template loader.
def template_dir(self):
return '%stemplates/%i/' % (settings.UPLOADTEMPLATE_MEDIA_ROOT,
self.pk)
def finished(sender, **kwargs):
Theme.objects.clear_cache()
request_finished.connect(finished)
|
pculture/django-uploadtemplate
|
uploadtemplate/models.py
|
Python
|
bsd-3-clause
| 5,865
|
# NOTE: parts of this file were taken from scipy's doc/source/conf.py. See
# scikit-bio/licenses/scipy.txt for scipy's license.
import glob
import sys
import os
import types
import re
if sys.version_info.major != 3:
raise RuntimeError("scikit-bio can only be used with Python 3. You are "
"currently running Python %d." % sys.version_info.major)
# Force matplotlib to not use any Xwindows backend.
import matplotlib
matplotlib.use('Agg')
import sphinx
import sphinx.ext.autosummary as autosummary
class NewAuto(autosummary.Autosummary):
def get_items(self, names):
# Camel to snake case from http://stackoverflow.com/a/1176023/579416
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def fix_item(display_name, sig, summary, real_name):
class_names = {
'TreeNode': 'tree',
'TabularMSA': 'msa'
}
class_name = real_name.split('.')[-2]
if class_name in class_names:
nice_name = class_names[class_name]
else:
s1 = first_cap_re.sub(r'\1_\2', class_name)
nice_name = all_cap_re.sub(r'\1_\2', s1).lower()
if len(nice_name) > 10:
nice_name = ''.join([e[0] for e in nice_name.split('_')])
def fmt(string):
count = string.count('%s')
return string % tuple([nice_name] * count)
specials = {
'__eq__': fmt('%s1 == %s2'),
'__ne__': fmt('%s1 != %s2'),
'__gt__': fmt('%s1 > %s2'),
'__lt__': fmt('%s1 < %s2'),
'__ge__': fmt('%s1 >= %s2'),
'__le__': fmt('%s1 <= %s2'),
'__getitem__': fmt('%s[x]'),
'__iter__': fmt('iter(%s)'),
'__contains__': fmt('x in %s'),
'__bool__': fmt('bool(%s)'),
'__str__': fmt('str(%s)'),
'__reversed__': fmt('reversed(%s)'),
'__len__': fmt('len(%s)'),
'__copy__': fmt('copy.copy(%s)'),
'__deepcopy__': fmt('copy.deepcopy(%s)'),
}
if display_name in specials:
prefixes = autosummary.get_import_prefixes_from_env(self.env)
obj = autosummary.import_by_name(display_name,
prefixes=prefixes)
# Filter out any slot_wrappers that work their way in (more below)
if type(obj[1]).__name__ == 'wrapper_descriptor':
return None
return specials[display_name], '', summary, real_name
return display_name, sig, summary, real_name
skip = ['__init_subclass__']
items = []
for item in super(NewAuto, self).get_items(names):
if item[0] not in skip:
temp_item = fix_item(*item)
# Drop slot_wrappers (see above)
if temp_item is not None:
items.append(temp_item)
return items
autosummary.Autosummary = NewAuto
import sphinx_bootstrap_theme
# The extra_public_methods depends on what class we are looking at.
import skbio
from skbio.util._decorator import classproperty
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here:
#
# sys.path.insert(0, os.path.abspath('../sphinxext/foo'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# Using `sphinx_version` doesn't work, likely because Sphinx is expecting a
# version string of the form X.Y, not X.Y.Z.
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx'
]
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-bio'
copyright = u'2014--, scikit-bio development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = skbio.__version__
# The full version, including alpha/beta/rc tags.
release = skbio.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# Exclude this file since it is only used by autosummary to generate other RST
# files during the build process, and it will generate sphinx errors and
# warnings otherwise.
exclude_patterns = ['_templates/autosummary/*.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': 'scikit-bio docs',
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': 'united',
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': False
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static/']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-biodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'scikit-bio.tex', u'scikit-bio Documentation',
u'scikit-bio development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-bio', u'scikit-bio Documentation',
[u'scikit-bio development team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scikit-bio', u'scikit-bio Documentation',
u'scikit-bio development team', 'scikit-bio',
'Data structures, algorithms, and educational resources for working with '
'biological data in Python.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for autosummary ----------------------------------------------
autosummary_generate = glob.glob('*.rst')
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
import scipy as sp
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), ]
#plot_html_show_formats = False
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.9,
'figure.subplot.wspace': 0.2,
'text.usetex': False,
# Some of our figures have legends outside the axes area. When they're
# rendered in an interactive context, nothing gets cut off, but when
# rendered in a static context (e.g., with savefig, which the plot
# directive uses), the legend can get cut off. Specifying 'tight' instead
# of 'standard' fixes the issue. See http://stackoverflow.com/a/10154763
'savefig.bbox': 'tight'
}
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://matplotlib.org': None,
'http://pandas.pydata.org/pandas-docs/stable': None,
'http://www.biom-format.org':None
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skbio.__file__))
if 'dev' in skbio.__version__:
return "http://github.com/biocore/scikit-bio/blob/master/skbio/%s%s" % (
fn, linespec)
else:
return "http://github.com/biocore/scikit-bio/blob/%s/skbio/%s%s" % (
skbio.__version__, fn, linespec)
#------------------------------------------------------------------------------
# linkcheck
#------------------------------------------------------------------------------
# Link-checking on Travis sometimes times out.
linkcheck_timeout = 30
# You might see the following exception when building the documentation:
# TypeError: 'abstractproperty' object is not iterable
def _closure():
def __get__(self, cls, owner):
return self
classproperty.__get__ = __get__
_closure()
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples, originally taken from scikit-learn's doc/conf.py
def setup(app):
app.add_javascript('copybutton.js')
app.add_stylesheet('style.css')
|
gregcaporaso/scikit-bio
|
doc/source/conf.py
|
Python
|
bsd-3-clause
| 16,829
|
#!/usr/bin/env python
import commands
import os
import popen2
import quopri
import base64
import re
import shutil
import string
import sys
import time
# Fix for older versions of Python
try:
True
except NameError:
True,False = 1,0
# Singleton-like design pattern
# See: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531
# class Constants:
# __shared_state = {}
# def __init__(self):
# self.__dict__ = self.__shared_state
# Import platform_specific data
import platform_specific
def printable(s):
"Convert a string to only printable characters"
# Apparently, the last two characters in string.printable are not XML-friendly
# This code could be problematic if string.printable's order varies by machine
return ''.join([c for c in s if c in string.printable[:98]])
def run(prog,args):
"""Run a program and return true if succeeds, false if fails
prog: Name of the program
args: List of command-line arguments"""
status = os.spawnv(os.P_WAIT,prog,[prog] + args)
success = os.WIFEXITED(status) and os.WEXITSTATUS(status)==0
return success
def getlogin():
"""Return the name of the login
We try several things until something works"""
try:
return os.environ['USER']
except:
try:
return os.environ['LOGNAME']
except:
try:
import pwd
return pwd.getpwuid(os.getuid())[0]
except:
return os.getlogin()
def hasextension(fname):
"""Check if a filename has an extension"""
return fname.find('.')!=-1
def issourcefile(fname):
"""Check if the file name corresponds to a source file.
For now, all files that have an extension that isn't .exe or .a or .o"""
return hasextension(fname) and True not in [fname.endswith(x) for x in ['.o','.exe','.a'] ]
def getlogfilepath(logfiledir):
"""Return the full path of the logfile"""
return os.path.join(logfiledir,getlogin() + '.log')
def getcommand(argv):
"""Retrieve a string version of the command that was invoked at the shell
We can't get it exactly because the shell does substitutions on the
command-line arguments."""
return ' '.join(argv)
# Parse through source code and retrieve headers
headerpat = re.compile(r'#include[ \t]+"(.*)"')
def parseheaders(source):
"""Extract the names of local header files from source code. Not smart enough to deal
with comments"""
return headerpat.findall(source)
def unique(alist):
"""Return unique elements from a list.
Taken from comments in http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560"""
myset = {}
return [myset.setdefault(e,e) for e in alist if e not in myset]
def flatten(alist):
"""Flatten a list. Each element in the list must itself be a list"""
return [x for y in alist for x in y]
def encode(string, encoding):
if encoding == 'quopri':
return quopri.encodestring(string)
elif encoding == 'base64':
return base64.encodestring(string)
elif encoding == 'raw':
return string
else:
return string
class CVSUtils:
"""Interacts with CVS."""
def __init__(self,sandbox,commitmsg):
"""sandbox - CVS sandbox directory which will be used for commits"""
self.sandbox = sandbox
self.commitmsg = commitmsg
def commit_files_to_cvs(self,files):
""" Commit the sourcefiles and headerfiles to CVS"""
for f in files:
self._copy_to_sandbox(f)
self._commit_sandbox_files()
def _copy_to_sandbox(self,fname):
""" Copy a file to the sandbox, creating directories and adding to CVS when necessary.
Does not do a commit"""
dest = self._change_base_directory(os.path.abspath(fname),self.sandbox)
self._check_and_create_dir(os.path.dirname(dest))
shutil.copy(fname,dest)
# We don't always need to add the file, but it's easier to try and add it every time
(status,output) = commands.getstatusoutput("cd %s ; cvs add %s" % (self.sandbox,dest))
if status!=0:
# Only complain if it's not an "already exists" problem
if output.find('already exists')==-1:
raise ValueError, "Could not add file %s: %s" % (dest,output)
def _check_and_create_dir(self,dirname):
"""Check if a directory exists, and if not, create it in the sandbox and and commit it.
The directory must be within the sandbox"""
if not os.path.exists(dirname):
# If it's not there, check the parent directory
self._check_and_create_dir(os.path.dirname(dirname))
os.mkdir(dirname)
rel_dirname = self._relative_path(dirname,self.sandbox)
(status,output) = commands.getstatusoutput("cd % s ; cvs add %s " % (self.sandbox,rel_dirname))
if status!=0:
raise ValueError, "Could not add directory %s: %s" % (dirname,output)
def _commit_sandbox_files(self):
"""Commits all of the files currently in the sandbox.
Returns the output of the CVS commit command"""
#return commands.getoutput("cd %s ; cvs commit -f -R -m ' ' ." % self.sandbox)
return commands.getoutput("cd %s ; cvs commit -m '%s' ." % (self.sandbox, self.commitmsg))
def gethomepaths(self):
""" Get the list of home directory paths. Some environments have
a number of different absolute paths mapped to the use home directory,
which becomes an issue when capturing the code to cvs.
It typically happens when the value of $HOME is different from
the standard naming convention on the filesystem.
The method used here is a little hackyish.
"""
cwd = os.getcwd()
home_dir = os.path.expanduser('~')
os.chdir(home_dir)
fs_dir = os.path.abspath('.')
os.chdir(cwd) # I hope this will always get you back to the original place...
if home_dir!= fs_dir:
return [home_dir, fs_dir]
else:
return [home_dir]
def _change_base_directory(self,fname,basename):
""" Change the base directory of fname from oldbase to newbase
Absolute path of files must be used!"""
# Compensate the possible problem with incompatible HOME paths
bases = self.gethomepaths()
for base in bases:
if os.path.commonprefix([fname,base]) == base:
fname = fname.replace(base, '/_HOME', 1)
# Drop leading delimiter
# FIXME: the following line is not portable...
return os.path.join(basename, fname[1:])
def _relative_path(self,fname,base):
"""Create a relative path from an absolute and a base"""
if os.path.commonprefix([fname,base])!=base:
raise ValueError, "Unexpected base in file" + fname
# Make sure base ends in a slash, or the following will fail
if base[-1] != '/':
base = base + '/'
return fname.replace(base,'')
class CompileData:
"""Holds data associated with a compile"""
def __init__(self,starttime,endtime,sourcefiles,subject,command,success,path,debugging=None,cvs=None):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.sourcefiles = sourcefiles
self.subject = subject
self.command = command
self.time_interval = endtime-starttime
self.success = success
self.path = path
self.debugging = debugging
self.cvs = cvs
self.encoding = platform_specific.encoding # 'base64', 'quopri' or 'raw'
self.headerfiles = []
# Determine the headerfiles
for sourcefile in self.sourcefiles:
srcdir = os.path.dirname(sourcefile)
candidates = parseheaders(open(sourcefile).read())
for h in candidates:
headerpath = os.path.join(srcdir,h)
if os.path.exists(headerpath):
self.headerfiles.append(headerpath)
def getcvscomment(self):
cxml_cvs = '''<compile success="%(success)d" %(debug)s>
<timestr>%(timestr)s</timestr>
<time_interval>%(time_interval).2f</time_interval>
</compile>
'''
if self.debugging is not None:
debug = 'debugging="%d"' % self.debugging
else:
debug = ''
return cxml_cvs % {
'timestr' : self.timestr,
'command' : self.command,
'success' : self.success,
'debug' : debug,
'time_interval' : self.time_interval
}
def addtocvs(self,sandbox):
"""Add the files to the CVS repository.
sandbox - location of CVS sandbox where files need to be copied and committed."""
commitmsg = self.getcvscomment()
cvs = CVSUtils(sandbox, commitmsg)
cvs.commit_files_to_cvs(self.sourcefiles+self.headerfiles)
def toxml(self):
sfxml = '''<sourcefile success="%(success)d">
<name>%(name)s</name>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<subject>%(subject)s</subject>
<command><![CDATA[%(command)s]]></command>
<time_interval>%(time_interval).2f</time_interval>
<path>%(path)s</path>
<source encode="%(encoding)s"><![CDATA[%(source)s]]></source>
</sourcefile>
'''
hfxml = '''<headerfile>
<name>%(name)s</name>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<subject>%(subject)s</subject>
<command><![CDATA[%(command)s]]></command>
<time_interval>%(time_interval).2f</time_interval>
<path>%(path)s</path>
<source encode="%(encoding)s"><![CDATA[%(source)s]]></source>
</headerfile>
'''
cxml = '''<compile success="%(success)d" %(debug)s>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<subject>%(subject)s</subject>
<command><![CDATA[%(command)s]]></command>
<time_interval>%(time_interval).2f</time_interval>
</compile>
'''
if self.debugging is not None:
debug = 'debugging="%d"' % self.debugging
else:
debug = ''
return '\n'.join([cxml % {
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'subject' : self.subject,
'command' : self.command,
'success' : self.success,
'debug' : debug,
'time_interval' : self.time_interval}] +
[sfxml % {'name' : name,
'success' : self.success,
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'subject' : self.subject,
'command' : self.command,
'time_interval' : self.time_interval,
'path' : self.path,
'encoding' : self.encoding,
'source' : encode(open(name).read(), self.encoding)}
for name in self.sourcefiles] +
[hfxml % {'name' : name,
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'subject' : self.subject,
'command' : self.command,
'time_interval' : self.time_interval,
'path' : self.path,
'encoding' : self.encoding,
'source' : encode(open(name).read(), self.encoding)}
for name in self.headerfiles])
def set_compiler_invoked():
os.environ['UMDINST_COMPILER_INVOKED']="1"
def compiler_already_invoked():
try:
val = os.environ['UMDINST_COMPILER_INVOKED']
return True
except KeyError:
return False
def ask_if_debugging():
c = ''
while c not in ['y','n']:
c = raw_input("Are you debugging? [y/n]: ").lower()
return c=='y'
def is_in_whitelist(subject,whitelistfile):
try:
approved = [x.rstrip() for x in open(whitelistfile).readlines()]
return subject in approved
except TypeError: # whitelistfile==None
return False
def identify_sourcefiles(args):
"""Identify source files from a list of command-line arguments.
args - Command-line arguments (does not include name of program)"""
# If there's a -o and a filename, remove it
try:
ind = args.index('-o')
del args[ind+1]
except:
pass
# Return all arguments that don't start with -, that are sourcefiles, and that are accessible
return [fname for fname in args if fname[0]!='-' and issourcefile(fname) and os.access(fname,os.R_OK)]
def capture_compile(compiler,argv=sys.argv,logex=None):
"""Capture information associated with a compile.
Return true if compile succeeded, else false"""
sandbox=os.path.expanduser(platform_specific.sandbox)
whitelistfile=platform_specific.whitelistfile #os.path.expanduser(platform_specific.whitelistfile)
starttime = time.time()
args = argv[1:]
success = run(compiler,args)
# If compile succeeded, ask if debugging
subject = getlogin()
if success and is_in_whitelist(subject,whitelistfile):
is_debugging = ask_if_debugging()
else:
is_debugging = None
endtime = time.time()
c = CompileData(starttime=starttime,
endtime=endtime,
sourcefiles=identify_sourcefiles(args),
subject=subject,
command=' '.join(argv),
success=success,
path = os.getcwd(),
debugging=is_debugging)
if platform_specific.with_privatecvs:
#print "Writing to CVS..."
if sandbox is not None:
c.addtocvs(sandbox) # Add the files to CVS
if platform_specific.with_privatelog:
#print "Writing to Private logfile..."
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(c.toxml())
f.close()
if platform_specific.with_pooledlog:
#print "Writing to Pooled logfile..."
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(c.toxml())
f.close()
os.chmod(logfile,0644)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(c.toxml())
f.close()
os.chmod(logfile,0644)
if platform_specific.with_workflow:
print "Invoking the online workflow tool..."
return success
def capture_interactive_run(runprog,argv=sys.argv,logex=None):
"""Capture information associated with an interactive run
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(runprog,args)
endtime = time.time()
ir = InteractiveRunData(starttime=starttime,
endtime=endtime,
subject=getlogin(),
command=getcommand(argv),
success=success,
path=os.getcwd())
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(ir.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(ir.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(ir.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_batch_run(runprog,argv=sys.argv,logex=None):
"""Capture information associated with a bactch run
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(runprog,args)
endtime = time.time()
# Identify which file is the script file
fnames = [fname for fname in args if os.access(fname,os.R_OK)]
# There should only be either 1 or 0 args
# if there are more than one, just take the first
if len(fnames)>0:
fname = fnames[0]
script = open(fname).read()
else:
script = ''
br = BatchRunData(starttime=starttime,
endtime=endtime,
script=script,
subject=getlogin(),
command=getcommand(argv),
success=success,
path=os.getcwd())
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(br.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(br.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(br.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_profiled_run(runprog,outfile,argv=sys.argv,logex=None):
"""Capture information associated with an interactive run
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(runprog,args)
endtime = time.time()
# If the file can't be read, just keep the field blank
try:
profiledata=open(outfile).read()
except:
profiledata = ' '
pr = ProfileRunData(starttime=starttime,
endtime=endtime,
subject=getlogin(),
command=getcommand(argv),
success=success,
path=os.getcwd(),
profiledata=profiledata)
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(pr.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(pr.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(pr.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_profile_report(runprog,argv=sys.argv,logex=None):
"""Capture information associated with a profile report generation program
Return true if run succeeded, else false"""
starttime = time.time()
args = argv[1:]
(status, output) = commands.getstatusoutput(' '.join([runprog]+args))
endtime = time.time()
# Send the output to standard out
print output
if status==0:
success = True
else:
success = False
subject = getlogin()
rep = ProfilerReporterData(starttime=starttime,
endtime=endtime,
subject=subject,
command=' '.join(argv),
success=success,
path= os.getcwd(),
reportdata = output)
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(rep.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(rep.toxml())
f.close()
os.chmod(logfile,0755)
if logex is not None:
logfile = logex
f = open(logfile,'a')
f.write(rep.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_debugger(debuggerprog,argv=sys.argv,logex=None):
"""Capture information associated with a debugger
Return true if debugger succeeded, else false"""
starttime = time.time()
args = argv[1:]
success = run(debuggerprog,args)
endtime = time.time()
subject = getlogin()
deb = DebuggerData(starttime=starttime,
endtime=endtime,
subject=subject,
command=' '.join(argv),
success=success,
path=os.getcwd())
if platform_specific.with_privatelog:
logfile = getlogfilepath(os.path.expanduser(platform_specific.privatelogfiledir))
f = open(logfile,'a')
f.write(deb.toxml())
f.close()
if platform_specific.with_pooledlog:
logfile = getlogfilepath(platform_specific.logfiledir)
f = open(logfile,'a')
f.write(deb.toxml())
f.close()
os.chmod(logfile,0755)
return success
def capture_make(makeprog,logex=None):
starttime = time.time()
args = sys.argv[1:]
success = run(makeprog,args)
endtime = time.time()
c = MakeData(starttime,
endtime,
get_makefilename(args),
getlogin(),
' '.join(sys.argv),
success)
raise ValueError,"This function has not been implemented properly yet!"
class AbstractRunData:
"""Parent class for RunData children
Children must define a type() method and an extrafields() method"""
def __init__(self,starttime,endtime,subject,command,success,path):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.time_interval = endtime-starttime
self.subject = subject
self.command = command
self.success = success
self.path = path
def toxml(self):
xml = '''<job type="%(type)s" success="%(success)d">
<subject>%(subject)s</subject>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<command><![CDATA[%(command)s]]></command>
<path>%(path)s</path>
<time_interval>%(time_interval).2f</time_interval>
%(extra)s
</job>
'''
return xml % {
'type':self.type(),
'success':self.success,
'subject':self.subject,
'timestamp' : self.timestamp,
'timestr' : self.timestr,
'time_interval' : self.time_interval,
'path' : self.path,
'command' : self.command,
'extra': self.extrafields() }
class InteractiveRunData(AbstractRunData):
"""Holds data associated with an interactive run"""
def __init__(self,starttime,endtime,subject,command,success,path):
AbstractRunData.__init__(self,
starttime=starttime,
endtime=endtime,
subject=subject,
command=command,
success=success,
path=path)
def type(self):
return "interactive"
def extrafields(self):
return ""
class BatchRunData(AbstractRunData):
"""Holds data associated with a batch run"""
def __init__(self,starttime,endtime,script,subject,command,success,path):
AbstractRunData.__init__(self,
starttime=starttime,
endtime=endtime,
subject=subject,
command=command,
success=success,
path=path)
self.script = script
self.encoding = platform_specific.encoding # 'base64', 'quopri' or 'raw'
def type(self):
return "batch"
def extrafields(self):
return '<script encode="%s"><![CDATA[%s]]></script>' % (self.encoding, encode(self.script, self.encoding))
class ProfileRunData(AbstractRunData):
"""Holds data associated with a profiled run"""
def __init__(self,starttime,endtime,subject,command,success,path,profiledata):
AbstractRunData.__init__(self,
starttime=starttime,
endtime=endtime,
subject=subject,
command=command,
success=success,
path=path)
self.profiledata = profiledata
def type(self):
return "profiled"
def extrafields(self):
return ''.join(["<profiledata><![CDATA[",self.profiledata,"]]></profiledata>"])
class ProfilerReporterData:
def __init__(self,
starttime,
endtime,
subject,
command,
success,
path,
reportdata):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.subject = subject
self.command = command
self.path = path
self.time_interval = endtime-starttime
self.reportdata = reportdata
def toxml(self):
return """<profile_report>
<subject>%(subject)s</subject>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<command><![CDATA[%(command)s]]></command>
<path>%(path)s</path>
<time_interval>%(time_interval).2f</time_interval>
<contents><![CDATA[%(reportdata)s]]></contents>
</profile_report>""" % {'subject':self.subject,
'timestamp': self.timestamp,
'timestr' : self.timestr,
'command' : self.command,
'path' : self.path,
'time_interval' : self.time_interval,
'reportdata':self.reportdata}
class DebuggerData:
"""Data associated with the invocation of a debugger"""
def __init__(self,starttime,endtime,subject,command,success,path):
self.timestamp = starttime
self.timestr = time.ctime(self.timestamp)
self.subject = subject
self.command = command
self.success = success
self.path = path
self.time_interval = endtime-starttime
def toxml(self):
return """<debug success='%(success)d'>
<subject>%(subject)s</subject>
<time>%(timestamp)d</time>
<timestr>%(timestr)s</timestr>
<command><![CDATA[%(command)s]]></command>
<path>%(path)s</path>
<time_interval>%(time_interval).2f</time_interval>
</debug>""" % {
'success':self.success,
'subject':self.subject,
'timestamp': self.timestamp,
'timestr' : self.timestr,
'command' : self.command,
'time_interval' : self.time_interval,
'path' : self.path}
class MakeData:
def __init__(self,starttime,endtime,subject,command,success,path):
pass
def toxml(self):
return "<make></make>"
if __name__=='__main__':
compiler = '/usr/bin/gcc'
#capture_compile(compiler,platform_specific.logfiledir)
capture_compile(compiler)
|
lorin/umdinst
|
umdinst/wrap.py
|
Python
|
bsd-3-clause
| 25,968
|
# -*- coding: utf-8 -*-
#
# BSD licence
#
# Copyright (c) <2008-2011> Pierre Quentel (pierre.quentel@gmail.com)
# Copyright (c) <2014-2015> Bendik Rønning Opstad <bro.devel@gmail.com>.
#
"""
Main differences from :mod:`pydblite.pydblite`:
- pass the connection to the :class:`SQLite db <pydblite.sqlite.Database>` as argument to
:class:`Table <pydblite.sqlite.Table>`
- in :func:`create() <pydblite.sqlite.Table.create>` field definitions must specify a type.
- no `drop_field` (not supported by SQLite)
- the :class:`Table <pydblite.sqlite.Table>` instance has a
:attr:`cursor <pydblite.sqlite.Table.cursor>` attribute, so that raw SQL requests can
be executed.
"""
try:
import cStringIO as io
def to_str(val, encoding="utf-8"): # encode a Unicode string to a Python 2 str
return val.encode(encoding)
except ImportError:
import io
unicode = str # used in tests
def to_str(val): # leaves a Unicode unchanged
return val
import datetime
import re
import traceback
from .common import ExpressionGroup, Filter
# test if sqlite is installed or raise exception
try:
from sqlite3 import dbapi2 as sqlite
from sqlite3 import OperationalError
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite
from pysqlite2._sqlite import OperationalError
except ImportError:
print("SQLite is not installed")
raise
# compatibility with Python 2.3
try:
set([])
except NameError:
from sets import Set as set # NOQA
# classes for CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP
class CurrentDate:
def __call__(self):
return datetime.date.today().strftime('%Y-%M-%D')
class CurrentTime:
def __call__(self):
return datetime.datetime.now().strftime('%h:%m:%s')
class CurrentTimestamp:
def __call__(self):
return datetime.datetime.now().strftime('%Y-%M-%D %h:%m:%s')
DEFAULT_CLASSES = [CurrentDate, CurrentTime, CurrentTimestamp]
# functions to convert a value returned by a SQLite SELECT
# CURRENT_TIME format is HH:MM:SS
# CURRENT_DATE : YYYY-MM-DD
# CURRENT_TIMESTAMP : YYYY-MM-DD HH:MM:SS
c_time_fmt = re.compile('^(\d{2}):(\d{2}):(\d{2})$')
c_date_fmt = re.compile('^(\d{4})-(\d{2})-(\d{2})$')
c_tmsp_fmt = re.compile('^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})')
# DATE : convert YYYY-MM-DD to datetime.date instance
def to_date(date):
if date is None:
return None
mo = c_date_fmt.match(date)
if not mo:
raise ValueError("Bad value %s for DATE format" % date)
year, month, day = [int(x) for x in mo.groups()]
return datetime.date(year, month, day)
# TIME : convert HH-MM-SS to datetime.time instance
def to_time(_time):
if _time is None:
return None
mo = c_time_fmt.match(_time)
if not mo:
raise ValueError("Bad value %s for TIME format" % _time)
hour, minute, second = [int(x) for x in mo.groups()]
return datetime.time(hour, minute, second)
# DATETIME or TIMESTAMP : convert %YYYY-MM-DD HH:MM:SS
# to datetime.datetime instance
def to_datetime(timestamp):
if timestamp is None:
return None
if not isinstance(timestamp, unicode):
raise ValueError("Bad value %s for TIMESTAMP format" % timestamp)
mo = c_tmsp_fmt.match(timestamp)
if not mo:
raise ValueError("Bad value %s for TIMESTAMP format" % timestamp)
return datetime.datetime(*[int(x) for x in mo.groups()])
# if default value is CURRENT_DATE etc. SQLite doesn't
# give the information, default is the value of the
# variable as a string. We have to guess...
#
def guess_default_fmt(value):
mo = c_time_fmt.match(value)
if mo:
h, m, s = [int(x) for x in mo.groups()]
if (0 <= h <= 23) and (0 <= m <= 59) and (0 <= s <= 59):
return CurrentTime
mo = c_date_fmt.match(value)
if mo:
y, m, d = [int(x) for x in mo.groups()]
try:
datetime.date(y, m, d)
return CurrentDate
except:
pass
mo = c_tmsp_fmt.match(value)
if mo:
y, mth, d, h, mn, s = [int(x) for x in mo.groups()]
try:
datetime.datetime(y, mth, d, h, mn, s)
return CurrentTimestamp
except:
pass
return value
class SQLiteError(Exception):
"""SQLiteError"""
pass
class Database(dict):
def __init__(self, filename, **kw):
"""
To create an in-memory database provide ':memory:' as filename
Args:
- filename (str): The name of the database file, or ':memory:'
- kw (dict): Arguments forwarded to sqlite3.connect
"""
dict.__init__(self)
self.conn = sqlite.connect(filename, **kw)
"""The SQLite connection"""
self.cursor = self.conn.cursor()
"""The SQLite connections cursor"""
for table_name in self._tables():
self[table_name] = Table(table_name, self)
def __contains__(self, table):
return table in self._tables()
def __delitem__(self, table):
# drop table
if isinstance(table, Table):
table = table.name
self.cursor.execute('DROP TABLE %s' % table)
dict.__delitem__(self, table)
# The instance can be used as a context manager, to make sure that it is
# closed even if an exception is raised during operations
def __enter__(self):
"""Enter 'with' statement"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit 'with' statement"""
self.conn.close()
return exc_type is None
def _tables(self):
"""Return the list of table names in the database"""
tables = []
self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
for table_info in self.cursor.fetchall():
if table_info[0] != 'sqlite_sequence':
tables.append(table_info[0])
return tables
def close(self):
"""Closes the database"""
self.conn.close()
def commit(self):
"""Save any changes to the database"""
self.conn.commit()
def create(self, table_name, *fields, **kw):
self[table_name] = Table(table_name, self).create(*fields, **kw)
return self[table_name]
class Table(object):
def __init__(self, table_name, db):
"""
Args:
- table_name (str): The name of the SQLite table.
- db (:class:`Database <pydblite.sqlite.Database>`): The database.
"""
self.name = table_name
self.db = db
self.cursor = db.cursor
"""The SQLite connections cursor"""
self.conv_func = {}
self.mode = "open"
self._get_table_info()
def __call__(self, *args, **kw):
"""
Selection by field values.
db(key=value) returns the list of records where r[key] = value
Args:
- args (list): A field to filter on.
- kw (dict): pairs of field and value to filter on.
Returns:
- When args supplied, return a :class:`Filter <pydblite.common.Filter>`
object that filters on the specified field.
- When kw supplied, return all the records where field values matches
the key/values in kw.
"""
if args and kw:
raise SyntaxError("Can't specify positional AND keyword arguments")
use_expression = False
if args:
if len(args) > 1:
raise SyntaxError("Only one field can be specified")
if type(args[0]) is ExpressionGroup or type(args[0]) is Filter:
use_expression = True
elif args[0] not in self.fields:
raise ValueError("%s is not a field" % args[0])
else:
return self.filter(key=args[0])
if use_expression:
sql = "SELECT rowid,* FROM %s WHERE %s" % (self.name, args[0])
self.cursor.execute(sql)
return [self._make_record(row) for row in self.cursor.fetchall()]
else:
if kw:
undef = set(kw) - set(self.fields)
if undef:
raise ValueError("Fields %s not in the database" % undef)
vals = self._make_sql_params(kw)
sql = "SELECT rowid,* FROM %s WHERE %s" % (self.name, " AND ".join(vals))
self.cursor.execute(sql, list(kw.values()))
else:
self.cursor.execute("SELECT rowid,* FROM %s" % self.name)
records = self.cursor.fetchall()
return [self._make_record(row) for row in records]
def __delitem__(self, record_id):
"""Delete by record id"""
self.delete(self[record_id])
def __getitem__(self, record_id):
"""Direct access by record id."""
sql = "SELECT rowid,* FROM %s WHERE rowid=%s" % (self.name, record_id)
self.cursor.execute(sql)
res = self.cursor.fetchone()
if res is None:
raise IndexError("No record at index %s" % record_id)
else:
return self._make_record(res)
def __iter__(self):
"""Iteration on the records"""
self.cursor.execute("SELECT rowid,* FROM %s" % self.name)
results = [self._make_record(r) for r in self.cursor.fetchall()]
return iter(results)
def __len__(self):
return self._len()
def _err_msg(self, sql, args=None):
msg = "Exception for table %s.%s\n" % (self.db, self.name)
msg += 'SQL request %s\n' % sql
if args:
import pprint
msg += 'Arguments : %s\n' % pprint.saferepr(args)
out = io.StringIO()
traceback.print_exc(file=out)
msg += out.getvalue()
return msg
def _get_table_info(self):
"""Inspect the base to get field names."""
self.fields = []
self.field_info = {}
self.cursor.execute('PRAGMA table_info (%s)' % self.name)
for field_info in self.cursor.fetchall():
fname = to_str(field_info[1])
self.fields.append(fname)
ftype = to_str(field_info[2])
info = {'type': ftype}
# can be null ?
info['NOT NULL'] = field_info[3] != 0
# default value
default = field_info[4]
if isinstance(default, unicode):
default = guess_default_fmt(default)
info['DEFAULT'] = default
self.field_info[fname] = info
self.fields_with_id = ['__id__'] + self.fields
def _len(self, db_filter=None):
"""Return number of matching entries"""
if db_filter is not None and db_filter.is_filtered():
sql = "SELECT COUNT(*) AS count FROM %s WHERE %s" % (self.name, db_filter)
else:
sql = "SELECT COUNT(*) AS count FROM %s;" % self.name
self.cursor.execute(sql)
res = self.cursor.fetchone()
return res[0]
def _make_record(self, row, fields=None):
"""Make a record dictionary from the result of a fetch"""
if fields is None:
fields = self.fields_with_id
res = dict(zip(fields, row))
for field_name in self.conv_func:
res[field_name] = self.conv_func[field_name](res[field_name])
return res
def _make_sql_params(self, kw):
"""Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types."""
return ['%s=?' % k for k in kw.keys()]
def _table_exists(self):
return self.name in self.db
def _validate_field(self, field):
if len(field) != 2 and len(field) != 3:
msg = "Error in field definition %s" % field
msg += ": should be a tuple with field_name, field_info, and optionally a default value"
raise SQLiteError(msg)
field_sql = '%s %s' % (field[0], field[1])
if len(field) == 3 and field[2] is not None:
field_sql += " DEFAULT {0}".format(field[2])
return field_sql
def add_field(self, name, column_type="TEXT", default=None):
"""Add a new column to the table.
Args:
- name (string): The name of the field
- column_type (string): The data type of the column (Defaults to TEXT)
- default (datatype): The default value for this field (if any)
"""
sql = "ALTER TABLE %s ADD " % self.name
sql += self._validate_field((name, column_type, default))
self.cursor.execute(sql)
self.db.commit()
self._get_table_info()
def commit(self):
"""Save any changes to the database"""
self.db.commit()
def conv(self, field_name, conv_func):
"""When a record is returned by a SELECT, ask conversion of
specified field value with the specified function."""
if field_name not in self.fields:
raise NameError("Unknown field %s" % field_name)
self.conv_func[field_name] = conv_func
def create(self, *fields, **kw):
"""
Create a new table.
Args:
- fields (list of tuples): The fields names/types to create.
For each field, a 2-element tuple must be provided:
- the field name
- a string with additional information like field type +
other information using the SQLite syntax
eg ('name', 'TEXT NOT NULL'), ('date', 'BLOB DEFAULT CURRENT_DATE')
- mode (str): The mode used when creating the database.
mode is only used if a database file already exists.
- if mode = 'open' : open the existing base, ignore the fields
- if mode = 'override' : erase the existing base and create a
new one with the specified fields
Returns:
- the database (self).
"""
self.mode = mode = kw.get("mode", None)
if self._table_exists():
if mode == "override":
self.cursor.execute("DROP TABLE %s" % self.name)
elif mode == "open":
return self.open()
else:
self.db.close()
raise IOError("Base '%s' already exists" % self.name)
sql = "CREATE TABLE %s (" % self.name
for field in fields:
sql += self._validate_field(field) + ','
sql = sql[:-1] + ')'
self.cursor.execute(sql)
self._get_table_info()
return self
def create_index(self, *index_columns):
for ic in index_columns:
sql = "CREATE INDEX index_%s on %s (%s);" % (ic, self.name, ic)
self.cursor.execute(sql)
self.db.commit()
def delete(self, removed):
"""Remove a single record, or the records in an iterable.
Before starting deletion, test if all records are in the base
and don't have twice the same __id__.
Returns:
- int: the number of deleted items
"""
sql = "DELETE FROM %s " % self.name
if isinstance(removed, dict):
# remove a single record
_id = removed['__id__']
sql += "WHERE rowid = ?"
args = (_id,)
removed = [removed]
self.cursor.execute(sql, args)
else:
# convert iterable into a list
removed = [r for r in removed]
if not removed:
return 0
# max number of arguments for SQLITE is 999
for _removed in (removed[500*i:500*(i+1)]
for i in range((len(removed)//500)+1)):
args = [r['__id__'] for r in _removed]
sql = "DELETE FROM %s " % self.name
sql += "WHERE rowid IN (%s)" % (','.join(['?'] * len(args)))
self.cursor.execute(sql, args)
self.db.commit()
return len(removed)
def delete_index(self, *index_columns):
for ic in index_columns:
sql = "DROP INDEX index_%s;" % (ic)
self.cursor.execute(sql)
self.db.commit()
def drop_field(self, field):
raise SQLiteError("Dropping fields is not supported by SQLite")
def filter(self, key=None):
return Filter(self, key)
def get_group_count(self, group_by, db_filter=None):
"""Return the grouped by count of the values of a column"""
if db_filter is not None and db_filter.is_filtered():
sql = "SELECT %s, COUNT(*) FROM %s WHERE %s GROUP BY %s " % (group_by, self.name,
db_filter, group_by)
else:
sql = "SELECT %s, COUNT(*) FROM %s GROUP BY %s;" % (group_by, self.name, group_by)
self.cursor.execute(sql)
return self.cursor.fetchall()
def get_indices(self):
indices = []
sql = "SELECT * FROM sqlite_master WHERE type = 'index';"
try:
self.cursor.execute(sql)
except OperationalError:
return indices
records = self.cursor.fetchall()
for r in records:
indices.append(r[1][len("index_"):])
return indices
def get_unique_ids(self, unique_id, db_filter=None):
"""Return all the unique values of a column"""
sql = "SELECT rowid,%s FROM %s" % (unique_id, self.name)
if db_filter is not None and db_filter.is_filtered():
sql += " WHERE %s" % db_filter
self.cursor.execute(sql)
records = self.cursor.fetchall()
return set([row[1] for row in records])
def info(self):
# returns information about the table
return [(field, self.field_info[field]) for field in self.fields]
def insert(self, *args, **kw):
"""Insert a record in the database.
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the :func:`create` method.
Returns:
- The record identifier
"""
if args:
if isinstance(args[0], (list, tuple)):
return self.insert_many(args[0])
kw = dict([(f, arg) for f, arg in zip(self.fields, args)])
ks = kw.keys()
s1 = ",".join(ks)
qm = ','.join(['?'] * len(ks))
sql = "INSERT INTO %s (%s) VALUES (%s)" % (self.name, s1, qm)
self.cursor.execute(sql, list(kw.values()))
return self.cursor.lastrowid
def insert_many(self, args):
"""Insert a list or tuple of records
Returns:
- The last row id
"""
sql = "INSERT INTO %s" % self.name
sql += "(%s) VALUES (%s)"
if isinstance(args[0], dict):
ks = args[0].keys()
sql = sql % (', '.join(ks), ','.join(['?' for k in ks]))
args = [[arg[k] for k in ks] for arg in args]
else:
sql = sql % (', '.join(self.fields),
','.join(['?' for f in self.fields]))
try:
self.cursor.executemany(sql, args)
except:
raise Exception(self._err_msg(sql, args))
# return last row id
return self.cursor.lastrowid
def is_date(self, field_name):
"""Ask conversion of field to an instance of datetime.date"""
self.conv(field_name, to_date)
def is_datetime(self, field_name):
"""Ask conversion of field to an instance of datetime.date"""
self.conv(field_name, to_datetime)
def is_time(self, field_name):
"""Ask conversion of field to an instance of datetime.date"""
self.conv(field_name, to_time)
def open(self):
"""Open an existing database."""
return self
def update(self, record, **kw):
"""Update the record with new keys and values."""
vals = self._make_sql_params(kw)
sql = "UPDATE %s SET %s WHERE rowid=?" % (self.name,
",".join(vals))
self.cursor.execute(sql, list(kw.values()) + [record['__id__']])
self.db.commit()
Base = Table # compatibility with previous versions
|
PierreQuentel/PyDbLite
|
pydblite/sqlite.py
|
Python
|
bsd-3-clause
| 20,162
|
#!/usr/bin/env python
from io import open
import os
import subprocess
import sys
from setuptools import setup, Command, find_packages
import pkgdist
class PyTest(pkgdist.PyTest):
default_test_dir = os.path.join(pkgdist.TOPDIR, 'test')
class PyLint(Command):
user_options = [('errorsonly', 'E', 'Check only errors with pylint'),
('format=', 'f', 'Change the output format')]
def initialize_options(self):
self.errorsonly = 0
self.format = 'colorized'
def finalize_options(self):
pass
def run(self):
rcfile = os.path.abspath('.pylintrc')
standaloneModules = [m for m in []]
cli_options = ['-E'] if self.errorsonly else []
cli_options.append('--output-format={0}'.format(self.format))
errno = subprocess.call([sys.executable, '-m', 'pylint', '--rcfile={}'.format(rcfile), '--output-format=colorized'] +
cli_options + ['pychroot'] + standaloneModules)
raise SystemExit(errno)
test_requirements = ['pytest']
if sys.hexversion < 0x03030000:
test_requirements.append('mock')
with open('README.rst', 'r', encoding='utf-8') as f:
readme = f.read()
with open('NEWS.rst', 'r', encoding='utf-8') as f:
news = f.read()
setup(
name='pychroot',
version=pkgdist.version(),
description='a python library and cli tool that simplify chroot handling',
long_description=readme + '\n\n' + news,
author='Tim Harder',
author_email='radhermit@gmail.com',
url='https://github.com/pkgcore/pychroot',
license='BSD',
packages=find_packages(),
scripts=os.listdir('bin'),
install_requires=['snakeoil>=0.7.1'],
tests_require=test_requirements,
platforms='Posix',
cmdclass={
'build_py': pkgdist.build_py2to3,
'build_scripts': pkgdist.build_scripts,
'build_man': pkgdist.build_man,
'build_docs': pkgdist.build_docs,
'install_man': pkgdist.install_man,
'install_docs': pkgdist.install_docs,
'sdist': pkgdist.sdist,
'test': PyTest,
'lint': PyLint,
},
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
),
)
|
radhermit/pychroot
|
setup.py
|
Python
|
bsd-3-clause
| 2,384
|
# Copyright (c) 2016-2020, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python2 compatibility
from __future__ import absolute_import, print_function
import sys
if sys.version_info < (3,):
range = xrange
from bifrost.pipeline import SinkBlock, SourceBlock
import os
try:
import simplejson as json
except ImportError:
print("WARNING: Install simplejson for better performance")
import json
import glob
from functools import reduce
from bifrost import telemetry
telemetry.track_module()
def _parse_bifrost_filename(fname):
inds = fname[fname.find('.bf.') + 4:].split('.')[:-1]
inds = [int(i) for i in inds]
frame0, ringlet_inds = inds[0], inds[1:]
return frame0, ringlet_inds
class BifrostReader(object):
def __init__(self, basename):
assert(basename.endswith('.bf'))
hdr_filename = basename + '.json'
with open(hdr_filename, 'r') as hdr_file:
self.header = json.load(hdr_file)
data_filenames = glob.glob(basename + '.*.dat')
inds = [_parse_bifrost_filename(fname) for fname in data_filenames]
frame0s, ringlet_inds = zip(*inds)
nringlets = [max(r) + 1 for r in zip(*ringlet_inds)]
# TODO: Support multiple ringlet axes (needed in SerializeBlock too)
assert(len(nringlets) <= 1)
self.nringlet = nringlets[0] if len(nringlets) else 0
if self.nringlet > 0:
ringlet_inds = [inds[0] for inds in ringlet_inds]
self.ringlet_files = []
for ringlet in range(self.nringlet):
ringlet_filenames = [f for f, r in zip(data_filenames, ringlet_inds)
if r == ringlet]
ringlet_filenames.sort()
ringlet_files = [open(f, 'rb') for f in ringlet_filenames]
self.ringlet_files.append(ringlet_files)
self.nfile = len(self.ringlet_files[0])
if not all([len(files) == self.nfile for files in self.ringlet_files]):
raise IOError("Number of files in each ringlet does not match")
else:
data_filenames.sort()
self.files = [open(f, 'rb') for f in data_filenames]
self.nfile = len(self.files)
self.cur_file = 0
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self.nringlet > 0:
for ringlet in self.ringlet_files:
for f in ringlet:
f.close()
else:
for f in self.files:
f.close()
def readinto(self, buf, frame_nbyte):
if self.cur_file == self.nfile:
return 0
nframe_read = 0
if self.nringlet > 0:
# First dimension of buf is ringlets
bufs = buf
nbyte_reads = [ringlet_file[self.cur_file].readinto(buf)
for ringlet_file, buf in zip(self.ringlet_files, bufs)]
nbyte_read = min(nbyte_reads)
else:
nbyte_read = self.files[self.cur_file].readinto(buf)
if nbyte_read % frame_nbyte != 0:
raise IOError("Unexpected end of file")
nframe_read += nbyte_read // frame_nbyte
while nbyte_read < buf.nbytes:
self.cur_file += 1
if self.cur_file == self.nfile:
break
if self.nringlet > 0:
nbyte_reads = [ringlet_file[self.cur_file].readinto(buf)
for ringlet_file, buf in zip(self.ringlet_files, bufs)]
nbyte_read = min(nbyte_reads)
else:
nbyte_read = self.files[self.cur_file].readinto(buf)
if nbyte_read % frame_nbyte != 0:
raise IOError("Unexpected end of file")
nframe_read += nbyte_read // frame_nbyte
return nframe_read
class DeserializeBlock(SourceBlock):
def __init__(self, filenames, gulp_nframe, *args, **kwargs):
super(DeserializeBlock, self).__init__(filenames, gulp_nframe, *args, **kwargs)
def create_reader(self, sourcename):
return BifrostReader(sourcename)
def on_sequence(self, ireader, sourcename):
hdr = ireader.header
return [ireader.header]
def on_data(self, reader, ospans):
ospan = ospans[0]
return [reader.readinto(ospan.data, ospan.frame_nbyte)]
def deserialize(filenames, gulp_nframe, *args, **kwargs):
"""Deserializes a data stream from a set of files using a simple data format
Sequence headers are read as JSON files, and sequence data are read
directly as binary from separate files.
The actual header and data files must have the following general form::
# Header
<filename>.json
# Single-ringlet data
<filename>.<frame_offset>.dat
# Multi-ringlet data
<filename>.<frame_offset>.<ringlet>.dat
See also: ``serialize``
Args:
filenames (list): List of input filenames (each ending with '.bf')
gulp_nframe (int): No. frames to read at a time.
*args: Arguments to ``bifrost.pipeline.SourceBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.SourceBlock``.
**Tensor semantics**::
Input: One data file per sequence
Output: [frame, ...], dtype = any, space = SYSTEM
Input: One data file per ringlet
Output: [ringlet, frame, ...], dtype = any, space = SYSTEM
Returns:
DeserializeBlock: A new block instance.
"""
return DeserializeBlock(filenames, gulp_nframe, *args, **kwargs)
# **TODO: Write a DeserializeBlock that does the inverse of this
class SerializeBlock(SinkBlock):
def __init__(self, iring, path, max_file_size=None, *args, **kwargs):
super(SerializeBlock, self).__init__(iring, *args, **kwargs)
if path is None:
path = ''
self.path = path
if max_file_size is None:
max_file_size = 1024**3
self.max_file_size = max_file_size
def _close_data_files(self):
if hasattr(self, 'ofiles'):
for ofile in self.ofiles:
ofile.close()
def _open_new_data_files(self, frame_offset):
self._close_data_files()
self.bytes_written = 0
if self.frame_axis == 0:
# No ringlets, we can write all data to one file
filenames = [self.basename + '.bf.%012i.dat' % frame_offset]
elif self.frame_axis == 1:
# Ringlets, we must write each to a separate file
ndigit = len(str(self.nringlet-1))
filenames = [self.basename + ('.bf.%012i.%0'+str(ndigit)+'i.dat') %
(frame_offset, i)
for i in range(self.nringlet)]
else:
# TODO: Need to deal with separating multiple ringlet axes
# E.g., separate each ringlet dim with a dot
# Will have to lift/project the indices
raise NotImplementedError("Multiple ringlet axes not supported")
# Open data files
self.ofiles = [open(fname, 'wb') for fname in filenames]
def on_sequence(self, iseq):
hdr = iseq.header
tensor = hdr['_tensor']
if hdr['name'] != '':
self.basename = hdr['name']
else:
self.basename = '%020i' % hdr['time_tag']
if self.path != '':
# TODO: May need more flexibility in path handling
# E.g., may want to keep subdirs from original name
self.basename = os.path.basename(self.basename)
self.basename = os.path.join(self.path, self.basename)
# Write sequence header file
with open(self.basename + '.bf.json', 'w') as hdr_file:
hdr_file.write(json.dumps(hdr, indent=4, sort_keys=True))
shape = tensor['shape']
self.frame_axis = shape.index(-1)
self.nringlet = reduce(lambda a, b: a * b, shape[:self.frame_axis], 1)
self._open_new_data_files(frame_offset=0)
def on_sequence_end(self, iseq):
self._close_data_files()
def on_data(self, ispan):
if self.nringlet == 1:
bytes_to_write = ispan.data.nbytes
else:
bytes_to_write = ispan.data[0].nbytes
# Check if file size limit has been reached
if self.bytes_written + bytes_to_write > self.max_file_size:
self._open_new_data_files(ispan.frame_offset)
self.bytes_written += bytes_to_write
# Write data to file(s)
if self.nringlet == 1:
ispan.data.tofile(self.ofiles[0])
else:
for r in range(self.nringlet):
ispan.data[r].tofile(self.ofiles[r])
def serialize(iring, path=None, max_file_size=None, *args, **kwargs):
"""Serializes a data stream to a set of files using a simple data format
Sequence headers are written as JSON files, and sequence data are written
directly as binary to separate files.
Filenames begin with the sequence name if present, or the time tag if not.
The general form is::
# Header
<name_or_time_tag>.bf.json
# Single-ringlet data
<name_or_time_tag>.bf.<frame_offset>.dat
# Multi-ringlet data
<name_or_time_tag>.bf.<frame_offset>.<ringlet>.dat
Args:
iring (Ring or Block): Input data source.o
path (str): Path specifying where to write output files.
max_file_size (int): Max no. bytes to write to a single file. If set to
-1, no limit is applied.
*args: Arguments to ``bifrost.pipeline.SinkBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.SinkBlock``.
**Tensor semantics**::
Input: [frame, ...], dtype = any, space = SYSTEM
Output: One data file per sequence
Input: [ringlet, frame, ...], dtype = any, space = SYSTEM
Output: One data file per ringlet
Returns:
SerializeBlock: A new block instance.
"""
return SerializeBlock(iring, path, max_file_size, *args, **kwargs)
|
ledatelescope/bifrost
|
python/bifrost/blocks/serialize.py
|
Python
|
bsd-3-clause
| 11,503
|
from django.test import TestCase
from django.db.models import F
from django.core.exceptions import FieldError
from models import Employee, Company
class ExpressionsTestCase(TestCase):
fixtures = ['f_expression_testdata.json']
def test_basic_f_expression(self):
company_query = Company.objects.values('name','num_employees',
'num_chairs'
).order_by('name',
'num_employees',
'num_chairs')
# We can filter for companies where the number of employees is
# greater than the number of chairs.
self.assertItemsEqual(company_query.filter(
num_employees__gt=F('num_chairs')),
[{'num_chairs': 5, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 1, 'name': u'Test GmbH',
'num_employees': 32}])
# We can set one field to have the value of another field Make
# sure we have enough chairs
company_query.update(num_chairs=F('num_employees'))
self.assertItemsEqual(company_query,
[{'num_chairs': 2300, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 3, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 32, 'name': u'Test GmbH',
'num_employees': 32}])
# We can perform arithmetic operations in expressions. Make
# sure we have 2 spare chairs
company_query.update(num_chairs=F('num_employees')+2)
self.assertItemsEqual(company_query,
[{'num_chairs': 2302, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 5, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 34, 'name': u'Test GmbH',
'num_employees': 32}])
# Law of order of operations is followed
company_query.update(num_chairs=F('num_employees') +
2 * F('num_employees'))
self.assertItemsEqual(company_query,
[{'num_chairs': 6900, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 9, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 96, 'name': u'Test GmbH',
'num_employees': 32}])
# Law of order of operations can be overridden by parentheses
company_query.update(num_chairs=((F('num_employees') + 2) *
F('num_employees')))
self.assertItemsEqual(company_query,
[{'num_chairs': 5294600, 'name': u'Example Inc.',
'num_employees': 2300},
{'num_chairs': 15, 'name': u'Foobar Ltd.',
'num_employees': 3},
{'num_chairs': 1088, 'name': u'Test GmbH',
'num_employees': 32}])
# The relation of a foreign key can become copied over to an
# other foreign key.
self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3)
self.assertEqual(repr([c.point_of_contact for
c in Company.objects.all()]),
'[<Employee: Joe Smith>, <Employee: Frank Meyer>, <Employee: Max Mustermann>]')
def test_f_expression_spanning_join(self):
# F Expressions can also span joins
self.assertQuerysetEqual(
Company.objects.filter(
ceo__firstname=F('point_of_contact__firstname')
).distinct().order_by('name'),
['<Company: Foobar Ltd.>', '<Company: Test GmbH>'])
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name='foo')
self.assertEqual(Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
u'foo')
self.assertRaises(FieldError,
Company.objects.exclude(ceo__firstname=F('point_of_contact__firstname')).update,
name=F('point_of_contact__lastname'))
def test_f_expression_update_attribute(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name='Test GmbH')
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F('num_employees') + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
# F expressions cannot be used to update attributes which are
# foreign keys, or attributes which involve joins.
test_gmbh.point_of_contact = None
test_gmbh.save()
self.assertEqual(test_gmbh.point_of_contact, None)
self.assertRaises(ValueError,
setattr,
test_gmbh, 'point_of_contact', F('ceo'))
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F('ceo__last_name')
self.assertRaises(FieldError,
test_gmbh.save)
# F expressions cannot be used to update attributes on objects
# which do not yet exist in the database
acme = Company(name='The Acme Widget Co.', num_employees=12,
num_chairs=5, ceo=test_gmbh.ceo)
acme.num_employees = F('num_employees') + 16
self.assertRaises(TypeError,
acme.save)
|
sam-tsai/django-old
|
tests/modeltests/expressions/tests.py
|
Python
|
bsd-3-clause
| 6,079
|
import sys
import cProfile
from cStringIO import StringIO
import pstats
from django.conf import settings
class ProfilerMiddleware(object):
def process_view(self, request, callback, callback_args, callback_kwargs):
if settings.DEBUG and 'prof' in request.GET:
self.profiler = cProfile.Profile()
args = (request,) + callback_args
return self.profiler.runcall(callback, *args, **callback_kwargs)
def process_response(self, request, response):
if settings.DEBUG and 'prof' in request.GET:
self.profiler.create_stats()
outfile = "profile/stats.dump"
self.profiler.dump_stats(outfile)
out = StringIO()
p = pstats.Stats(outfile, stream=out)
#p.strip_dirs()
p.sort_stats('cumulative')
p.print_stats(-1)
response.content = '<pre>%s</pre>' % out.getvalue()
return response
|
vegarang/devilry-django
|
devilry/utils/profile.py
|
Python
|
bsd-3-clause
| 945
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
# I do not use orm['...']
# because in this way it is not possible to call models's methods
# such as `post.tags`
from feedzilla.models import Post
#for post in orm['feedzilla.Post'].objects.all():
for count, post in enumerate(Post.objects.all()):
if count and not count % 10:
print count
#if count and not count % 100:
#db.commit_transaction()
#db.start_transaction()
tags = [x.strip() for x in post.rawtags.split(',') if x.strip()]
post.tags.add(*tags)
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feedzilla.feed': {
'Meta': {'object_name': 'Feed'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'etag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'site_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'skip_filters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.feedzillatag': {
'Meta': {'object_name': 'FeedzillaTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'feedzilla.feedzillatagitem': {
'Meta': {'object_name': 'FeedzillaTagItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'feedzilla_feedzillatagitem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['feedzilla.FeedzillaTag']"})
},
u'feedzilla.filtertag': {
'Meta': {'object_name': 'FilterTag'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.filterword': {
'Meta': {'object_name': 'FilterWord'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.post': {
'Meta': {'ordering': "['-created']", 'object_name': 'Post'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': u"orm['feedzilla.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'rawtags': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.request': {
'Meta': {'ordering': "['-created']", 'object_name': 'Request'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['taggit', 'feedzilla']
symmetrical = True
|
feedzilla/feedzilla
|
feedzilla/migrations/0011_setup_taggit_data.py
|
Python
|
bsd-3-clause
| 7,349
|
import pytest
import socket
import types
from collections import defaultdict
from itertools import count
from queue import Empty, Queue as _Queue
from unittest.mock import ANY, Mock, call, patch
from case import ContextMock, mock
from kombu import Connection, Exchange, Queue, Consumer, Producer
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.transport import virtual
from kombu.utils import eventio # patch poll
from kombu.utils.json import dumps
class _poll(eventio._select):
def register(self, fd, flags):
if flags & eventio.READ:
self._rfd.add(fd)
def poll(self, timeout):
events = []
for fd in self._rfd:
if fd.data:
events.append((fd.fileno(), eventio.READ))
return events
eventio.poll = _poll
pytest.importorskip('redis')
# must import after poller patch, pep8 complains
from kombu.transport import redis # noqa
class ResponseError(Exception):
pass
class Client:
queues = {}
sets = defaultdict(set)
hashes = defaultdict(dict)
shard_hint = None
def __init__(self, db=None, port=None, connection_pool=None, **kwargs):
self._called = []
self._connection = None
self.bgsave_raises_ResponseError = False
self.connection = self._sconnection(self)
def bgsave(self):
self._called.append('BGSAVE')
if self.bgsave_raises_ResponseError:
raise ResponseError()
def delete(self, key):
self.queues.pop(key, None)
def exists(self, key):
return key in self.queues or key in self.sets
def hset(self, key, k, v):
self.hashes[key][k] = v
def hget(self, key, k):
return self.hashes[key].get(k)
def hdel(self, key, k):
self.hashes[key].pop(k, None)
def sadd(self, key, member, *args):
self.sets[key].add(member)
def zadd(self, key, *args):
if redis.redis.VERSION[0] >= 3:
(mapping,) = args
for item in mapping:
self.sets[key].add(item)
else:
# TODO: remove me when we drop support for Redis-py v2
(score1, member1) = args
self.sets[key].add(member1)
def smembers(self, key):
return self.sets.get(key, set())
def ping(self, *args, **kwargs):
return True
def srem(self, key, *args):
self.sets.pop(key, None)
zrem = srem
def llen(self, key):
try:
return self.queues[key].qsize()
except KeyError:
return 0
def lpush(self, key, value):
self.queues[key].put_nowait(value)
def parse_response(self, connection, type, **options):
cmd, queues = self.connection._sock.data.pop()
queues = list(queues)
assert cmd == type
self.connection._sock.data = []
if type == 'BRPOP':
timeout = queues.pop()
item = self.brpop(queues, timeout)
if item:
return item
raise Empty()
def brpop(self, keys, timeout=None):
for key in keys:
try:
item = self.queues[key].get_nowait()
except Empty:
pass
else:
return key, item
def rpop(self, key):
try:
return self.queues[key].get_nowait()
except (KeyError, Empty):
pass
def __contains__(self, k):
return k in self._called
def pipeline(self):
return Pipeline(self)
def encode(self, value):
return str(value)
def _new_queue(self, key):
self.queues[key] = _Queue()
class _sconnection:
disconnected = False
class _socket:
blocking = True
filenos = count(30)
def __init__(self, *args):
self._fileno = next(self.filenos)
self.data = []
def fileno(self):
return self._fileno
def setblocking(self, blocking):
self.blocking = blocking
def __init__(self, client):
self.client = client
self._sock = self._socket()
def disconnect(self):
self.disconnected = True
def send_command(self, cmd, *args):
self._sock.data.append((cmd, args))
def info(self):
return {'foo': 1}
def pubsub(self, *args, **kwargs):
connection = self.connection
class ConnectionPool:
def get_connection(self, *args, **kwargs):
return connection
self.connection_pool = ConnectionPool()
return self
class Pipeline:
def __init__(self, client):
self.client = client
self.stack = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def __getattr__(self, key):
if key not in self.__dict__:
def _add(*args, **kwargs):
self.stack.append((getattr(self.client, key), args, kwargs))
return self
return _add
return self.__dict__[key]
def execute(self):
stack = list(self.stack)
self.stack[:] = []
return [fun(*args, **kwargs) for fun, args, kwargs in stack]
class Channel(redis.Channel):
def _get_client(self):
return Client
def _get_pool(self, asynchronous=False):
return Mock()
def _get_response_error(self):
return ResponseError
def _new_queue(self, queue, **kwargs):
for pri in self.priority_steps:
self.client._new_queue(self._q_for_pri(queue, pri))
def pipeline(self):
return Pipeline(Client())
class Transport(redis.Transport):
Channel = Channel
def _get_errors(self):
return ((KeyError,), (IndexError,))
class test_Channel:
def setup(self):
self.connection = self.create_connection()
self.channel = self.connection.default_channel
def create_connection(self, **kwargs):
kwargs.setdefault('transport_options', {'fanout_patterns': True})
return Connection(transport=Transport, **kwargs)
def _get_one_delivery_tag(self, n='test_uniq_tag'):
with self.create_connection() as conn1:
chan = conn1.default_channel
chan.exchange_declare(n)
chan.queue_declare(n)
chan.queue_bind(n, n, n)
msg = chan.prepare_message('quick brown fox')
chan.basic_publish(msg, n, n)
payload = chan._get(n)
assert payload
pymsg = chan.message_to_python(payload)
return pymsg.delivery_tag
def test_delivery_tag_is_uuid(self):
seen = set()
for i in range(100):
tag = self._get_one_delivery_tag()
assert tag not in seen
seen.add(tag)
with pytest.raises(ValueError):
int(tag)
assert len(tag) == 36
def test_disable_ack_emulation(self):
conn = Connection(transport=Transport, transport_options={
'ack_emulation': False,
})
chan = conn.channel()
assert not chan.ack_emulation
assert chan.QoS == virtual.QoS
def test_redis_ping_raises(self):
pool = Mock(name='pool')
pool_at_init = [pool]
client = Mock(name='client')
class XChannel(Channel):
def __init__(self, *args, **kwargs):
self._pool = pool_at_init[0]
super().__init__(*args, **kwargs)
def _get_client(self):
return lambda *_, **__: client
class XTransport(Transport):
Channel = XChannel
conn = Connection(transport=XTransport)
client.ping.side_effect = RuntimeError()
with pytest.raises(RuntimeError):
conn.channel()
pool.disconnect.assert_called_with()
pool.disconnect.reset_mock()
pool_at_init = [None]
with pytest.raises(RuntimeError):
conn.channel()
pool.disconnect.assert_not_called()
def test_get_redis_ConnectionError(self):
from redis.exceptions import ConnectionError
from kombu.transport.redis import get_redis_ConnectionError
connection_error = get_redis_ConnectionError()
assert connection_error == ConnectionError
def test_after_fork_cleanup_channel(self):
from kombu.transport.redis import _after_fork_cleanup_channel
channel = Mock()
_after_fork_cleanup_channel(channel)
channel._after_fork.assert_called_once()
def test_after_fork(self):
self.channel._pool = None
self.channel._after_fork()
pool = self.channel._pool = Mock(name='pool')
self.channel._after_fork()
pool.disconnect.assert_called_with()
def test_next_delivery_tag(self):
assert (self.channel._next_delivery_tag() !=
self.channel._next_delivery_tag())
def test_do_restore_message(self):
client = Mock(name='client')
pl1 = {'body': 'BODY'}
spl1 = dumps(pl1)
lookup = self.channel._lookup = Mock(name='_lookup')
lookup.return_value = {'george', 'elaine'}
self.channel._do_restore_message(
pl1, 'ex', 'rkey', client,
)
client.rpush.assert_has_calls([
call('george', spl1), call('elaine', spl1),
], any_order=True)
client = Mock(name='client')
pl2 = {'body': 'BODY2', 'headers': {'x-funny': 1}}
headers_after = dict(pl2['headers'], redelivered=True)
spl2 = dumps(dict(pl2, headers=headers_after))
self.channel._do_restore_message(
pl2, 'ex', 'rkey', client,
)
client.rpush.assert_any_call('george', spl2)
client.rpush.assert_any_call('elaine', spl2)
client.rpush.side_effect = KeyError()
with patch('kombu.transport.redis.crit') as crit:
self.channel._do_restore_message(
pl2, 'ex', 'rkey', client,
)
crit.assert_called()
def test_restore(self):
message = Mock(name='message')
with patch('kombu.transport.redis.loads') as loads:
loads.return_value = 'M', 'EX', 'RK'
client = self.channel._create_client = Mock(name='client')
client = client()
client.pipeline = ContextMock()
restore = self.channel._do_restore_message = Mock(
name='_do_restore_message',
)
pipe = client.pipeline.return_value
pipe_hget = Mock(name='pipe.hget')
pipe.hget.return_value = pipe_hget
pipe_hget_hdel = Mock(name='pipe.hget.hdel')
pipe_hget.hdel.return_value = pipe_hget_hdel
result = Mock(name='result')
pipe_hget_hdel.execute.return_value = None, None
self.channel._restore(message)
client.pipeline.assert_called_with()
unacked_key = self.channel.unacked_key
loads.assert_not_called()
tag = message.delivery_tag
pipe.hget.assert_called_with(unacked_key, tag)
pipe_hget.hdel.assert_called_with(unacked_key, tag)
pipe_hget_hdel.execute.assert_called_with()
pipe_hget_hdel.execute.return_value = result, None
self.channel._restore(message)
loads.assert_called_with(result)
restore.assert_called_with('M', 'EX', 'RK', client, False)
def test_qos_restore_visible(self):
client = self.channel._create_client = Mock(name='client')
client = client()
def pipe(*args, **kwargs):
return Pipeline(client)
client.pipeline = pipe
client.zrevrangebyscore.return_value = [
(1, 10),
(2, 20),
(3, 30),
]
qos = redis.QoS(self.channel)
restore = qos.restore_by_tag = Mock(name='restore_by_tag')
qos._vrestore_count = 1
qos.restore_visible()
client.zrevrangebyscore.assert_not_called()
assert qos._vrestore_count == 2
qos._vrestore_count = 0
qos.restore_visible()
restore.assert_has_calls([
call(1, client), call(2, client), call(3, client),
])
assert qos._vrestore_count == 1
qos._vrestore_count = 0
restore.reset_mock()
client.zrevrangebyscore.return_value = []
qos.restore_visible()
restore.assert_not_called()
assert qos._vrestore_count == 1
qos._vrestore_count = 0
client.setnx.side_effect = redis.MutexHeld()
qos.restore_visible()
def test_basic_consume_when_fanout_queue(self):
self.channel.exchange_declare(exchange='txconfan', type='fanout')
self.channel.queue_declare(queue='txconfanq')
self.channel.queue_bind(queue='txconfanq', exchange='txconfan')
assert 'txconfanq' in self.channel._fanout_queues
self.channel.basic_consume('txconfanq', False, None, 1)
assert 'txconfanq' in self.channel.active_fanout_queues
assert self.channel._fanout_to_queue.get('txconfan') == 'txconfanq'
def test_basic_cancel_unknown_delivery_tag(self):
assert self.channel.basic_cancel('txaseqwewq') is None
def test_subscribe_no_queues(self):
self.channel.subclient = Mock()
self.channel.active_fanout_queues.clear()
self.channel._subscribe()
self.channel.subclient.subscribe.assert_not_called()
def test_subscribe(self):
self.channel.subclient = Mock()
self.channel.active_fanout_queues.add('a')
self.channel.active_fanout_queues.add('b')
self.channel._fanout_queues.update(a=('a', ''), b=('b', ''))
self.channel._subscribe()
self.channel.subclient.psubscribe.assert_called()
s_args, _ = self.channel.subclient.psubscribe.call_args
assert sorted(s_args[0]) == ['/{db}.a', '/{db}.b']
self.channel.subclient.connection._sock = None
self.channel._subscribe()
self.channel.subclient.connection.connect.assert_called_with()
def test_handle_unsubscribe_message(self):
s = self.channel.subclient
s.subscribed = True
self.channel._handle_message(s, ['unsubscribe', 'a', 0])
assert not s.subscribed
def test_handle_pmessage_message(self):
res = self.channel._handle_message(
self.channel.subclient,
['pmessage', 'pattern', 'channel', 'data'],
)
assert res == {
'type': 'pmessage',
'pattern': 'pattern',
'channel': 'channel',
'data': 'data',
}
def test_handle_message(self):
res = self.channel._handle_message(
self.channel.subclient,
['type', 'channel', 'data'],
)
assert res == {
'type': 'type',
'pattern': None,
'channel': 'channel',
'data': 'data',
}
def test_brpop_start_but_no_queues(self):
assert self.channel._brpop_start() is None
def test_receive(self):
s = self.channel.subclient = Mock()
self.channel._fanout_to_queue['a'] = 'b'
self.channel.connection._deliver = Mock(name='_deliver')
message = {
'body': 'hello',
'properties': {
'delivery_tag': 1,
'delivery_info': {'exchange': 'E', 'routing_key': 'R'},
},
}
s.parse_response.return_value = ['message', 'a', dumps(message)]
self.channel._receive_one(self.channel.subclient)
self.channel.connection._deliver.assert_called_once_with(
message, 'b',
)
def test_receive_raises_for_connection_error(self):
self.channel._in_listen = True
s = self.channel.subclient = Mock()
s.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._receive_one(self.channel.subclient)
assert not self.channel._in_listen
def test_receive_empty(self):
s = self.channel.subclient = Mock()
s.parse_response.return_value = None
assert self.channel._receive_one(self.channel.subclient) is None
def test_receive_different_message_Type(self):
s = self.channel.subclient = Mock()
s.parse_response.return_value = ['message', '/foo/', 0, 'data']
assert self.channel._receive_one(self.channel.subclient) is None
def test_receive_connection_has_gone(self):
def _receive_one(c):
c.connection = None
_receive_one.called = True
return True
_receive_one.called = False
self.channel._receive_one = _receive_one
assert self.channel._receive()
assert _receive_one.called
def test_brpop_read_raises(self):
c = self.channel.client = Mock()
c.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._brpop_read()
c.connection.disconnect.assert_called_with()
def test_brpop_read_gives_None(self):
c = self.channel.client = Mock()
c.parse_response.return_value = None
with pytest.raises(redis.Empty):
self.channel._brpop_read()
def test_poll_error(self):
c = self.channel.client = Mock()
c.parse_response = Mock()
self.channel._poll_error('BRPOP')
c.parse_response.assert_called_with(c.connection, 'BRPOP')
c.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._poll_error('BRPOP')
def test_poll_error_on_type_LISTEN(self):
c = self.channel.subclient = Mock()
c.parse_response = Mock()
self.channel._poll_error('LISTEN')
c.parse_response.assert_called_with()
c.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._poll_error('LISTEN')
def test_put_fanout(self):
self.channel._in_poll = False
c = self.channel._create_client = Mock()
body = {'hello': 'world'}
self.channel._put_fanout('exchange', body, '')
c().publish.assert_called_with('/{db}.exchange', dumps(body))
def test_put_priority(self):
client = self.channel._create_client = Mock(name='client')
msg1 = {'properties': {'priority': 3}}
self.channel._put('george', msg1)
client().lpush.assert_called_with(
self.channel._q_for_pri('george', 3), dumps(msg1),
)
msg2 = {'properties': {'priority': 313}}
self.channel._put('george', msg2)
client().lpush.assert_called_with(
self.channel._q_for_pri('george', 9), dumps(msg2),
)
msg3 = {'properties': {}}
self.channel._put('george', msg3)
client().lpush.assert_called_with(
self.channel._q_for_pri('george', 0), dumps(msg3),
)
def test_delete(self):
x = self.channel
x._create_client = Mock()
x._create_client.return_value = x.client
delete = x.client.delete = Mock()
srem = x.client.srem = Mock()
x._delete('queue', 'exchange', 'routing_key', None)
delete.assert_has_calls([
call(x._q_for_pri('queue', pri)) for pri in redis.PRIORITY_STEPS
])
srem.assert_called_with(x.keyprefix_queue % ('exchange',),
x.sep.join(['routing_key', '', 'queue']))
def test_has_queue(self):
self.channel._create_client = Mock()
self.channel._create_client.return_value = self.channel.client
exists = self.channel.client.exists = Mock()
exists.return_value = True
assert self.channel._has_queue('foo')
exists.assert_has_calls([
call(self.channel._q_for_pri('foo', pri))
for pri in redis.PRIORITY_STEPS
])
exists.return_value = False
assert not self.channel._has_queue('foo')
def test_close_when_closed(self):
self.channel.closed = True
self.channel.close()
def test_close_deletes_autodelete_fanout_queues(self):
self.channel._fanout_queues = {'foo': ('foo', ''), 'bar': ('bar', '')}
self.channel.auto_delete_queues = ['foo']
self.channel.queue_delete = Mock(name='queue_delete')
client = self.channel.client
self.channel.close()
self.channel.queue_delete.assert_has_calls([
call('foo', client=client),
])
def test_close_client_close_raises(self):
c = self.channel.client = Mock()
connection = c.connection
connection.disconnect.side_effect = self.channel.ResponseError()
self.channel.close()
connection.disconnect.assert_called_with()
def test_invalid_database_raises_ValueError(self):
with pytest.raises(ValueError):
self.channel.connection.client.virtual_host = 'dwqeq'
self.channel._connparams()
def test_connparams_allows_slash_in_db(self):
self.channel.connection.client.virtual_host = '/123'
assert self.channel._connparams()['db'] == 123
def test_connparams_db_can_be_int(self):
self.channel.connection.client.virtual_host = 124
assert self.channel._connparams()['db'] == 124
def test_new_queue_with_auto_delete(self):
redis.Channel._new_queue(self.channel, 'george', auto_delete=False)
assert 'george' not in self.channel.auto_delete_queues
redis.Channel._new_queue(self.channel, 'elaine', auto_delete=True)
assert 'elaine' in self.channel.auto_delete_queues
def test_connparams_regular_hostname(self):
self.channel.connection.client.hostname = 'george.vandelay.com'
assert self.channel._connparams()['host'] == 'george.vandelay.com'
def test_connparams_password_for_unix_socket(self):
self.channel.connection.client.hostname = \
'socket://:foo@/var/run/redis.sock'
connection_parameters = self.channel._connparams()
password = connection_parameters['password']
path = connection_parameters['path']
assert (password, path) == ('foo', '/var/run/redis.sock')
self.channel.connection.client.hostname = \
'socket://@/var/run/redis.sock'
connection_parameters = self.channel._connparams()
password = connection_parameters['password']
path = connection_parameters['path']
assert (password, path) == (None, '/var/run/redis.sock')
def test_connparams_health_check_interval_not_supported(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis+socket:///tmp/redis.sock') as conn:
conn.default_channel.connection_class = \
Mock(name='connection_class')
connparams = conn.default_channel._connparams()
assert 'health_check_interval' not in connparams
def test_connparams_health_check_interval_supported(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis+socket:///tmp/redis.sock') as conn:
connparams = conn.default_channel._connparams()
assert connparams['health_check_interval'] == 25
def test_rotate_cycle_ValueError(self):
cycle = self.channel._queue_cycle
cycle.update(['kramer', 'jerry'])
cycle.rotate('kramer')
assert cycle.items, ['jerry' == 'kramer']
cycle.rotate('elaine')
def test_get_client(self):
import redis as R
KombuRedis = redis.Channel._get_client(self.channel)
assert KombuRedis
Rv = getattr(R, 'VERSION', None)
try:
R.VERSION = (2, 4, 0)
with pytest.raises(VersionMismatch):
redis.Channel._get_client(self.channel)
finally:
if Rv is not None:
R.VERSION = Rv
def test_get_response_error(self):
from redis.exceptions import ResponseError
assert redis.Channel._get_response_error(self.channel) is ResponseError
def test_avail_client(self):
self.channel._pool = Mock()
cc = self.channel._create_client = Mock()
with self.channel.conn_or_acquire():
pass
cc.assert_called_with()
def test_register_with_event_loop(self):
transport = self.connection.transport
transport.cycle = Mock(name='cycle')
transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'}
conn = Mock(name='conn')
conn.client = Mock(name='client', transport_options={})
loop = Mock(name='loop')
redis.Transport.register_with_event_loop(transport, conn, loop)
transport.cycle.on_poll_init.assert_called_with(loop.poller)
loop.call_repeatedly.assert_has_calls([
call(10, transport.cycle.maybe_restore_messages),
call(25, transport.cycle.maybe_check_subclient_health),
])
loop.on_tick.add.assert_called()
on_poll_start = loop.on_tick.add.call_args[0][0]
on_poll_start()
transport.cycle.on_poll_start.assert_called_with()
loop.add_reader.assert_has_calls([
call(12, transport.on_readable, 12),
call(13, transport.on_readable, 13),
])
def test_configurable_health_check(self):
transport = self.connection.transport
transport.cycle = Mock(name='cycle')
transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'}
conn = Mock(name='conn')
conn.client = Mock(name='client', transport_options={
'health_check_interval': 15,
})
loop = Mock(name='loop')
redis.Transport.register_with_event_loop(transport, conn, loop)
transport.cycle.on_poll_init.assert_called_with(loop.poller)
loop.call_repeatedly.assert_has_calls([
call(10, transport.cycle.maybe_restore_messages),
call(15, transport.cycle.maybe_check_subclient_health),
])
loop.on_tick.add.assert_called()
on_poll_start = loop.on_tick.add.call_args[0][0]
on_poll_start()
transport.cycle.on_poll_start.assert_called_with()
loop.add_reader.assert_has_calls([
call(12, transport.on_readable, 12),
call(13, transport.on_readable, 13),
])
def test_transport_on_readable(self):
transport = self.connection.transport
cycle = transport.cycle = Mock(name='cyle')
cycle.on_readable.return_value = None
redis.Transport.on_readable(transport, 13)
cycle.on_readable.assert_called_with(13)
def test_transport_get_errors(self):
assert redis.Transport._get_errors(self.connection.transport)
def test_transport_driver_version(self):
assert redis.Transport.driver_version(self.connection.transport)
def test_transport_get_errors_when_InvalidData_used(self):
from redis import exceptions
class ID(Exception):
pass
DataError = getattr(exceptions, 'DataError', None)
InvalidData = getattr(exceptions, 'InvalidData', None)
exceptions.InvalidData = ID
exceptions.DataError = None
try:
errors = redis.Transport._get_errors(self.connection.transport)
assert errors
assert ID in errors[1]
finally:
if DataError is not None:
exceptions.DataError = DataError
if InvalidData is not None:
exceptions.InvalidData = InvalidData
def test_empty_queues_key(self):
channel = self.channel
channel._in_poll = False
key = channel.keyprefix_queue % 'celery'
# Everything is fine, there is a list of queues.
channel.client.sadd(key, 'celery\x06\x16\x06\x16celery')
assert channel.get_table('celery') == [
('celery', '', 'celery'),
]
# ... then for some reason, the _kombu.binding.celery key gets lost
channel.client.srem(key)
# which raises a channel error so that the consumer/publisher
# can recover by redeclaring the required entities.
with pytest.raises(InconsistencyError):
self.channel.get_table('celery')
def test_socket_connection(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis+socket:///tmp/redis.sock') as conn:
connparams = conn.default_channel._connparams()
assert issubclass(
connparams['connection_class'],
redis.redis.UnixDomainSocketConnection,
)
assert connparams['path'] == '/tmp/redis.sock'
def test_ssl_argument__dict(self):
with patch('kombu.transport.redis.Channel._create_client'):
# Expected format for redis-py's SSLConnection class
ssl_params = {
'ssl_cert_reqs': 2,
'ssl_ca_certs': '/foo/ca.pem',
'ssl_certfile': '/foo/cert.crt',
'ssl_keyfile': '/foo/pkey.key'
}
with Connection('redis://', ssl=ssl_params) as conn:
params = conn.default_channel._connparams()
assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs']
assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs']
assert params['ssl_certfile'] == ssl_params['ssl_certfile']
assert params['ssl_keyfile'] == ssl_params['ssl_keyfile']
assert params.get('ssl') is None
def test_ssl_connection(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis://', ssl={'ssl_cert_reqs': 2}) as conn:
connparams = conn.default_channel._connparams()
assert issubclass(
connparams['connection_class'],
redis.redis.SSLConnection,
)
def test_rediss_connection(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('rediss://') as conn:
connparams = conn.default_channel._connparams()
assert issubclass(
connparams['connection_class'],
redis.redis.SSLConnection,
)
def test_sep_transport_option(self):
with Connection(transport=Transport, transport_options={
'sep': ':',
}) as conn:
key = conn.default_channel.keyprefix_queue % 'celery'
conn.default_channel.client.sadd(key, 'celery::celery')
assert conn.default_channel.sep == ':'
assert conn.default_channel.get_table('celery') == [
('celery', '', 'celery'),
]
class test_Redis:
def setup(self):
self.connection = Connection(transport=Transport)
self.exchange = Exchange('test_Redis', type='direct')
self.queue = Queue('test_Redis', self.exchange, 'test_Redis')
def teardown(self):
self.connection.close()
@mock.replace_module_value(redis.redis, 'VERSION', [3, 0, 0])
def test_publish__get_redispyv3(self):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
self.queue(channel).declare()
producer.publish({'hello': 'world'})
assert self.queue(channel).get().payload == {'hello': 'world'}
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
@mock.replace_module_value(redis.redis, 'VERSION', [2, 5, 10])
def test_publish__get_redispyv2(self):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
self.queue(channel).declare()
producer.publish({'hello': 'world'})
assert self.queue(channel).get().payload == {'hello': 'world'}
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
def test_publish__consume(self):
connection = Connection(transport=Transport)
channel = connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
consumer = Consumer(channel, queues=[self.queue])
producer.publish({'hello2': 'world2'})
_received = []
def callback(message_data, message):
_received.append(message_data)
message.ack()
consumer.register_callback(callback)
consumer.consume()
assert channel in channel.connection.cycle._channels
try:
connection.drain_events(timeout=1)
assert _received
with pytest.raises(socket.timeout):
connection.drain_events(timeout=0.01)
finally:
channel.close()
def test_purge(self):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
self.queue(channel).declare()
for i in range(10):
producer.publish({'hello': f'world-{i}'})
assert channel._size('test_Redis') == 10
assert self.queue(channel).purge() == 10
channel.close()
def test_db_values(self):
Connection(virtual_host=1,
transport=Transport).channel()
Connection(virtual_host='1',
transport=Transport).channel()
Connection(virtual_host='/1',
transport=Transport).channel()
with pytest.raises(Exception):
Connection('redis:///foo').channel()
def test_db_port(self):
c1 = Connection(port=None, transport=Transport).channel()
c1.close()
c2 = Connection(port=9999, transport=Transport).channel()
c2.close()
def test_close_poller_not_active(self):
c = Connection(transport=Transport).channel()
cycle = c.connection.cycle
c.client.connection
c.close()
assert c not in cycle._channels
def test_close_ResponseError(self):
c = Connection(transport=Transport).channel()
c.client.bgsave_raises_ResponseError = True
c.close()
def test_close_disconnects(self):
c = Connection(transport=Transport).channel()
conn1 = c.client.connection
conn2 = c.subclient.connection
c.close()
assert conn1.disconnected
assert conn2.disconnected
def test_get__Empty(self):
channel = self.connection.channel()
with pytest.raises(Empty):
channel._get('does-not-exist')
channel.close()
def test_get_client(self):
with mock.module_exists(*_redis_modules()):
conn = Connection(transport=Transport)
chan = conn.channel()
assert chan.Client
assert chan.ResponseError
assert conn.transport.connection_errors
assert conn.transport.channel_errors
def test_check_at_least_we_try_to_connect_and_fail(self):
import redis
connection = Connection('redis://localhost:65534/')
with pytest.raises(redis.exceptions.ConnectionError):
chan = connection.channel()
chan._size('some_queue')
def _redis_modules():
class ConnectionError(Exception):
pass
class AuthenticationError(Exception):
pass
class InvalidData(Exception):
pass
class InvalidResponse(Exception):
pass
class ResponseError(Exception):
pass
exceptions = types.ModuleType('redis.exceptions')
exceptions.ConnectionError = ConnectionError
exceptions.AuthenticationError = AuthenticationError
exceptions.InvalidData = InvalidData
exceptions.InvalidResponse = InvalidResponse
exceptions.ResponseError = ResponseError
class Redis:
pass
myredis = types.ModuleType('redis')
myredis.exceptions = exceptions
myredis.Redis = Redis
return myredis, exceptions
class test_MultiChannelPoller:
def setup(self):
self.Poller = redis.MultiChannelPoller
def test_on_poll_start(self):
p = self.Poller()
p._channels = []
p.on_poll_start()
p._register_BRPOP = Mock(name='_register_BRPOP')
p._register_LISTEN = Mock(name='_register_LISTEN')
chan1 = Mock(name='chan1')
p._channels = [chan1]
chan1.active_queues = []
chan1.active_fanout_queues = []
p.on_poll_start()
chan1.active_queues = ['q1']
chan1.active_fanout_queues = ['q2']
chan1.qos.can_consume.return_value = False
p.on_poll_start()
p._register_LISTEN.assert_called_with(chan1)
p._register_BRPOP.assert_not_called()
chan1.qos.can_consume.return_value = True
p._register_LISTEN.reset_mock()
p.on_poll_start()
p._register_BRPOP.assert_called_with(chan1)
p._register_LISTEN.assert_called_with(chan1)
def test_on_poll_init(self):
p = self.Poller()
chan1 = Mock(name='chan1')
p._channels = []
poller = Mock(name='poller')
p.on_poll_init(poller)
assert p.poller is poller
p._channels = [chan1]
p.on_poll_init(poller)
chan1.qos.restore_visible.assert_called_with(
num=chan1.unacked_restore_limit,
)
def test_handle_event(self):
p = self.Poller()
chan = Mock(name='chan')
p._fd_to_chan[13] = chan, 'BRPOP'
chan.handlers = {'BRPOP': Mock(name='BRPOP')}
chan.qos.can_consume.return_value = False
p.handle_event(13, redis.READ)
chan.handlers['BRPOP'].assert_not_called()
chan.qos.can_consume.return_value = True
p.handle_event(13, redis.READ)
chan.handlers['BRPOP'].assert_called_with()
p.handle_event(13, redis.ERR)
chan._poll_error.assert_called_with('BRPOP')
p.handle_event(13, ~(redis.READ | redis.ERR))
def test_fds(self):
p = self.Poller()
p._fd_to_chan = {1: 2}
assert p.fds == p._fd_to_chan
def test_close_unregisters_fds(self):
p = self.Poller()
poller = p.poller = Mock()
p._chan_to_sock.update({1: 1, 2: 2, 3: 3})
p.close()
assert poller.unregister.call_count == 3
u_args = poller.unregister.call_args_list
assert sorted(u_args) == [
((1,), {}),
((2,), {}),
((3,), {}),
]
def test_close_when_unregister_raises_KeyError(self):
p = self.Poller()
p.poller = Mock()
p._chan_to_sock.update({1: 1})
p.poller.unregister.side_effect = KeyError(1)
p.close()
def test_close_resets_state(self):
p = self.Poller()
p.poller = Mock()
p._channels = Mock()
p._fd_to_chan = Mock()
p._chan_to_sock = Mock()
p._chan_to_sock.itervalues.return_value = []
p._chan_to_sock.values.return_value = [] # py3k
p.close()
p._channels.clear.assert_called_with()
p._fd_to_chan.clear.assert_called_with()
p._chan_to_sock.clear.assert_called_with()
def test_register_when_registered_reregisters(self):
p = self.Poller()
p.poller = Mock()
channel, client, type = Mock(), Mock(), Mock()
sock = client.connection._sock = Mock()
sock.fileno.return_value = 10
p._chan_to_sock = {(channel, client, type): 6}
p._register(channel, client, type)
p.poller.unregister.assert_called_with(6)
assert p._fd_to_chan[10] == (channel, type)
assert p._chan_to_sock[(channel, client, type)] == sock
p.poller.register.assert_called_with(sock, p.eventflags)
# when client not connected yet
client.connection._sock = None
def after_connected():
client.connection._sock = Mock()
client.connection.connect.side_effect = after_connected
p._register(channel, client, type)
client.connection.connect.assert_called_with()
def test_register_BRPOP(self):
p = self.Poller()
channel = Mock()
channel.client.connection._sock = None
p._register = Mock()
channel._in_poll = False
p._register_BRPOP(channel)
assert channel._brpop_start.call_count == 1
assert p._register.call_count == 1
channel.client.connection._sock = Mock()
p._chan_to_sock[(channel, channel.client, 'BRPOP')] = True
channel._in_poll = True
p._register_BRPOP(channel)
assert channel._brpop_start.call_count == 1
assert p._register.call_count == 1
def test_register_LISTEN(self):
p = self.Poller()
channel = Mock()
channel.subclient.connection._sock = None
channel._in_listen = False
p._register = Mock()
p._register_LISTEN(channel)
p._register.assert_called_with(channel, channel.subclient, 'LISTEN')
assert p._register.call_count == 1
assert channel._subscribe.call_count == 1
channel._in_listen = True
p._chan_to_sock[(channel, channel.subclient, 'LISTEN')] = 3
channel.subclient.connection._sock = Mock()
p._register_LISTEN(channel)
assert p._register.call_count == 1
assert channel._subscribe.call_count == 1
def create_get(self, events=None, queues=None, fanouts=None):
_pr = [] if events is None else events
_aq = [] if queues is None else queues
_af = [] if fanouts is None else fanouts
p = self.Poller()
p.poller = Mock()
p.poller.poll.return_value = _pr
p._register_BRPOP = Mock()
p._register_LISTEN = Mock()
channel = Mock()
p._channels = [channel]
channel.active_queues = _aq
channel.active_fanout_queues = _af
return p, channel
def test_get_no_actions(self):
p, channel = self.create_get()
with pytest.raises(redis.Empty):
p.get(Mock())
def test_qos_reject(self):
p, channel = self.create_get()
qos = redis.QoS(channel)
qos.ack = Mock(name='Qos.ack')
qos.reject(1234)
qos.ack.assert_called_with(1234)
def test_get_brpop_qos_allow(self):
p, channel = self.create_get(queues=['a_queue'])
channel.qos.can_consume.return_value = True
with pytest.raises(redis.Empty):
p.get(Mock())
p._register_BRPOP.assert_called_with(channel)
def test_get_brpop_qos_disallow(self):
p, channel = self.create_get(queues=['a_queue'])
channel.qos.can_consume.return_value = False
with pytest.raises(redis.Empty):
p.get(Mock())
p._register_BRPOP.assert_not_called()
def test_get_listen(self):
p, channel = self.create_get(fanouts=['f_queue'])
with pytest.raises(redis.Empty):
p.get(Mock())
p._register_LISTEN.assert_called_with(channel)
def test_get_receives_ERR(self):
p, channel = self.create_get(events=[(1, eventio.ERR)])
p._fd_to_chan[1] = (channel, 'BRPOP')
with pytest.raises(redis.Empty):
p.get(Mock())
channel._poll_error.assert_called_with('BRPOP')
def test_get_receives_multiple(self):
p, channel = self.create_get(events=[(1, eventio.ERR),
(1, eventio.ERR)])
p._fd_to_chan[1] = (channel, 'BRPOP')
with pytest.raises(redis.Empty):
p.get(Mock())
channel._poll_error.assert_called_with('BRPOP')
class test_Mutex:
def test_mutex(self, lock_id='xxx'):
client = Mock(name='client')
lock = client.lock.return_value = Mock(name='lock')
# Won
lock.acquire.return_value = True
held = False
with redis.Mutex(client, 'foo1', 100):
held = True
assert held
lock.acquire.assert_called_with(blocking=False)
client.lock.assert_called_with('foo1', timeout=100)
client.reset_mock()
lock.reset_mock()
# Did not win
lock.acquire.return_value = False
held = False
with pytest.raises(redis.MutexHeld):
with redis.Mutex(client, 'foo1', 100):
held = True
assert not held
lock.acquire.assert_called_with(blocking=False)
client.lock.assert_called_with('foo1', timeout=100)
client.reset_mock()
lock.reset_mock()
# Wins but raises LockNotOwnedError (and that is ignored)
lock.acquire.return_value = True
lock.release.side_effect = redis.redis.exceptions.LockNotOwnedError()
held = False
with redis.Mutex(client, 'foo1', 100):
held = True
assert held
class test_RedisSentinel:
def test_method_called(self):
from kombu.transport.redis import SentinelChannel
with patch.object(SentinelChannel, '_sentinel_managed_pool') as p:
connection = Connection(
'sentinel://localhost:65534/',
transport_options={
'master_name': 'not_important',
},
)
connection.channel()
p.assert_called()
def test_getting_master_from_sentinel(self):
with patch('redis.sentinel.Sentinel') as patched:
connection = Connection(
'sentinel://localhost/;'
'sentinel://localhost:65532/;'
'sentinel://user@localhost:65533/;'
'sentinel://:password@localhost:65534/;'
'sentinel://user:password@localhost:65535/;',
transport_options={
'master_name': 'not_important',
},
)
connection.channel()
patched.assert_called_once_with(
[
('localhost', 26379),
('localhost', 65532),
('localhost', 65533),
('localhost', 65534),
('localhost', 65535),
],
connection_class=mock.ANY, db=0, max_connections=10,
min_other_sentinels=0, password=None, sentinel_kwargs=None,
socket_connect_timeout=None, socket_keepalive=None,
socket_keepalive_options=None, socket_timeout=None,
retry_on_timeout=None)
master_for = patched.return_value.master_for
master_for.assert_called()
master_for.assert_called_with('not_important', ANY)
master_for().connection_pool.get_connection.assert_called()
def test_getting_master_from_sentinel_single_node(self):
with patch('redis.sentinel.Sentinel') as patched:
connection = Connection(
'sentinel://localhost:65532/',
transport_options={
'master_name': 'not_important',
},
)
connection.channel()
patched.assert_called_once_with(
[('localhost', 65532)],
connection_class=mock.ANY, db=0, max_connections=10,
min_other_sentinels=0, password=None, sentinel_kwargs=None,
socket_connect_timeout=None, socket_keepalive=None,
socket_keepalive_options=None, socket_timeout=None,
retry_on_timeout=None)
master_for = patched.return_value.master_for
master_for.assert_called()
master_for.assert_called_with('not_important', ANY)
master_for().connection_pool.get_connection.assert_called()
def test_can_create_connection(self):
from redis.exceptions import ConnectionError
connection = Connection(
'sentinel://localhost:65534/',
transport_options={
'master_name': 'not_important',
},
)
with pytest.raises(ConnectionError):
connection.channel()
def test_missing_master_name_transport_option(self):
connection = Connection(
'sentinel://localhost:65534/',
)
with patch('redis.sentinel.Sentinel'), \
pytest.raises(ValueError) as excinfo:
connection.connect()
expected = "'master_name' transport option must be specified."
assert expected == excinfo.value.args[0]
def test_sentinel_with_ssl(self):
ssl_params = {
'ssl_cert_reqs': 2,
'ssl_ca_certs': '/foo/ca.pem',
'ssl_certfile': '/foo/cert.crt',
'ssl_keyfile': '/foo/pkey.key'
}
with patch('redis.sentinel.Sentinel') as patched:
with Connection(
'sentinel://',
transport_options={'master_name': 'not_important'},
ssl=ssl_params) as conn:
params = conn.default_channel._connparams()
assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs']
assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs']
assert params['ssl_certfile'] == ssl_params['ssl_certfile']
assert params['ssl_keyfile'] == ssl_params['ssl_keyfile']
assert params.get('ssl') is None
from kombu.transport.redis import SentinelManagedSSLConnection
assert (params['connection_class'] is
SentinelManagedSSLConnection)
|
ZoranPavlovic/kombu
|
t/unit/transport/test_redis.py
|
Python
|
bsd-3-clause
| 49,876
|
#!/usr/bin/env python
# Written by Oliver Beckstein, 2014
# Placed into the Public Domain
from __future__ import print_function
import sys
import subprocess
import socket
DEFAULTS = {'queuename': ["workstations.q"],
'machine': socket.getfqdn(),
'deltatime': 4,
}
class GEqueue(object):
def __init__(self, name):
self.name = name
def issuspended(self):
"""Return ``True`` if the queue is in the s(uspended) state."""
cmd = subprocess.Popen(["qselect", "-qs", "s", "-q", self.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
# return True if qselect found this queue amongst the suspended ones
return cmd.returncode == 0
def suspend(self):
rc = subprocess.call(["qmod", "-s", self.name])
return rc == 0
def unsuspend(self):
rc = subprocess.call(["qmod", "-us", self.name])
return rc == 0
def schedule_unsuspend(self, time="21:00"):
"""Run the 'at' command at *time* to unsuspend the queue.
*time* should be a time string understood by at, e.g., 'now +1 h'
or 'today 9pm'.
"""
cmd = subprocess.Popen(["at", str(time)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate("qmod -us {0}".format(self.name))
return cmd.returncode
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Suspend the queue on HOSTNAME or MACHINE "
"until TIME h have passed or until you run "
"qsuspend again. "
"Note that the executing user has to be a Gridengine "
"admin or the script must be run through 'sudo'. "
"If you cannot run it, talk to a sysadmin.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("machine", metavar="MACHINE", nargs="?",
default=DEFAULTS['machine'],
help="Fully qualified hostname where the queue QUEUENAME should be suspended")
parser.add_argument("-q", "--queue-name", metavar="QUEUENAME", nargs='*', dest="queuename",
default=DEFAULTS['queuename'],
help="Name of the Gridengine queue instance.")
parser.add_argument("-t", "--time", metavar="TIME", type=float, dest="time",
default=DEFAULTS['deltatime'],
help="Suspended queues are automatically unsuspended after that many hours. "
"The maximum allowed value is 8 (hours).")
args = parser.parse_args()
# check unsuspend time is reasonable
if args.time > 8:
print("Maximum suspend time exceeded: set to 8h")
args.time = 8.
elif args.time < 0:
print("ERROR: Suspend time must be >= 0")
sys.exit(1)
for queue in args.queuename:
queuename = queue+"@"+args.machine
q = GEqueue(queuename)
if not q.issuspended():
# suspend
success = q.suspend()
if success:
print("Suspended queue {0}".format(queuename))
minutes = int(args.time * 60)
q.schedule_unsuspend(time="now + {0} min".format(minutes))
print("Will automatically unsuspend the queue after {0} hours".format(args.time))
else:
success = q.unsuspend()
if success:
print("Unsuspended queue {0}".format(queuename))
|
Becksteinlab/queuetools
|
bin/qsuspend.py
|
Python
|
bsd-3-clause
| 3,709
|
def extractFujiboytlWordpressCom(item):
'''
Parser for 'fujiboytl.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractFujiboytlWordpressCom.py
|
Python
|
bsd-3-clause
| 558
|
"""
----------------------------------------------------------------------
Authors: Jan-Justin van Tonder
----------------------------------------------------------------------
Unit tests for the Verify Text module.
----------------------------------------------------------------------
"""
import pytest
from hutts_verification.verification.text_verify import TextVerify
def test_verify_blank():
"""
Tests the verify function with blank args.
"""
verifier = TextVerify()
assert verifier.verify({}, {}) == (False, 0.0)
def test_verify_default():
"""
Tests the return value of the verify function with default args.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': '####################################',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info) == (True, 87.5)
def test_verify_default_no_match():
"""
Tests the return value of the verify function with default args.
In this case we expect a 0% match.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '####################################',
'surname': '####################################',
'names': '####################################',
'sex': '####################################',
'date_of_birth': '####################################',
'country_of_birth': '####################################',
'status': '####################################',
'nationality': '####################################'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info) == (False, 0.0)
def test_verify_default_half_match():
"""
Tests the return value of the verify function with default args.
In this case we expect a 50% match.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '####################################',
'country_of_birth': '####################################',
'status': '####################################',
'nationality': '####################################'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info) == (False, 50.0)
def test_verify_default_full_match():
"""
Tests the return value of the verify function with default args.
In this case we expect a 100% match.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info) == (True, 100.0)
def test_verify_threshold():
"""
Tests the return value of the verify function with a specified threshold arg.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': '####################################',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info, threshold=90.0) == (False, 87.5)
def test_verify_min_matches():
"""
Tests the return value of the verify function with a specified minimum number of matches arg.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'not valid': '####################################',
'not legit': '####################################',
'not gonna work': '####################################',
'try again': '####################################'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info, min_matches=6) == (False, 0.0)
def test_verify_verbose_1():
"""
Tests the return value of the verify function with a specified verbose arg.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Door',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'GRSAGT',
'status': 'Cytyziny',
'nationality': 'RSA'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
'nationality': 'RSA'
}
assert verifier.verify(extracted_info, verifier_info, verbose=True) == (True, {
'identity_number': {
'match_percentage': 100.0,
'verifier_field_value': '7101135111011',
'extracted_field_value': '7101135111011'
},
'surname': {
'match_percentage': 57.14,
'verifier_field_value': 'Doe',
'extracted_field_value': 'Door'
},
'names': {
'match_percentage': 100.0,
'verifier_field_value': 'John-Michael Robert',
'extracted_field_value': 'John-Michael Robert'
},
'sex': {
'match_percentage': 100.0,
'verifier_field_value': 'M',
'extracted_field_value': 'M'
},
'date_of_birth': {
'match_percentage': 100.0,
'verifier_field_value': '71-01-13',
'extracted_field_value': '71-01-13'
},
'country_of_birth': {
'match_percentage': 66.67,
'verifier_field_value': 'RSA',
'extracted_field_value': 'GRSAGT'
},
'status': {
'match_percentage': 53.33,
'verifier_field_value': 'Citizen',
'extracted_field_value': 'Cytyziny'
},
'nationality': {
'match_percentage': 100.0,
'verifier_field_value': 'RSA',
'extracted_field_value': 'RSA'
},
'total': 84.64
})
def test_verify_verbose_2():
"""
Tests the return value of the verify function with a specified verbose arg.
"""
verifier = TextVerify()
extracted_info = {
'identity_number': '7101135111011',
'surname': 'Door',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'GRSAGT',
'nationality': 'RSA'
}
verifier_info = {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '71-01-13',
'country_of_birth': 'RSA',
'status': 'Citizen',
}
assert verifier.verify(extracted_info, verifier_info, verbose=True) == (True, {
'identity_number': {
'match_percentage': 100.0,
'verifier_field_value': '7101135111011',
'extracted_field_value': '7101135111011'
},
'surname': {
'match_percentage': 57.14,
'verifier_field_value': 'Doe',
'extracted_field_value': 'Door'
},
'names': {
'match_percentage': 100.0,
'verifier_field_value': 'John-Michael Robert',
'extracted_field_value': 'John-Michael Robert'
},
'sex': {
'match_percentage': 100.0,
'verifier_field_value': 'M',
'extracted_field_value': 'M'
},
'date_of_birth': {
'match_percentage': 100.0,
'verifier_field_value': '71-01-13',
'extracted_field_value': '71-01-13'
},
'country_of_birth': {
'match_percentage': 66.67,
'verifier_field_value': 'RSA',
'extracted_field_value': 'GRSAGT'
},
'status': {
'match_percentage': None,
'verifier_field_value': 'Citizen',
'extracted_field_value': None
},
'nationality': {
'match_percentage': None,
'verifier_field_value': None,
'extracted_field_value': 'RSA'
},
'total': 87.3
})
def test_verify_invalid_arg_extracted_1():
"""
Tests to see if the verify function raises the correct exception for an invalid extracted arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify('not quite', {})
def test_verify_invalid_arg_extracted_2():
"""
Tests to see if the verify function raises the correct exception for an invalid extracted arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify({'identity_number': 1234}, {'identity_number': '7101135111011'})
def test_verify_invalid_arg_verifier_1():
"""
Tests to see if the verify function raises the correct exception for an invalid verifier arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify({}, 'does not seem legit')
def test_verify_invalid_arg_verifier_2():
"""
Tests to see if the verify function raises the correct exception for an invalid verifier arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify({'names': 'John Legit'}, {'names': ['not', 'John', 'legit']})
def test_verify_invalid_arg_threshold():
"""
Tests to see if the verify function raises the correct exception for an invalid threshold arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify({}, {}, threshold=['nope'])
def test_verify_invalid_arg_min_matches():
"""
Tests to see if the verify function raises the correct exception for an invalid min_matches arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify({}, {}, min_matches=['nope again'])
def test_verify_invalid_arg_verbose():
"""
Tests to see if the verify function raises the correct exception for an invalid verbose arg.
"""
verifier = TextVerify()
with pytest.raises(TypeError):
verifier.verify({}, {}, verbose='nah fam')
|
javaTheHutts/Java-the-Hutts
|
src/unittest/python/test_text_verify.py
|
Python
|
bsd-3-clause
| 12,383
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compare orientation matrices
Uses the mat3 and vec3 classes from Python Computer Graphics Kit v1.2.0
module by Matthias Baas (see http://cgkit.sourceforge.net).
License: http://www.opensource.org/licenses/bsd-license.php
"""
__author__ = "Pierre Legrand (pierre.legrand \at synchrotron-soleil.fr)"
__date__ = "22-03-2011"
__copyright__ = "Copyright (c) 2011 Pierre Legrand"
__license__ = "New BSD License"
__version__ = "0.0.1"
import sys
import os
import math
from XOconv import *
from AxisAndAngle import axis_and_angle, R2D
_progname = os.path.split(sys.argv[0])[1]
_usage = """
compare crystal orientation matrices.
A program to convert the orientation matix, extract information
from XDS output files and write a mosflm input file:
USAGE: %s [OPTION]... FILE
FILE can be one of these XDS output files:
XPARM.XDS, GXPARM.XDS, IDXREF.LP, CORRECT.LP
OPTIONS:
-a
--angles
Writes out the crystal orientation in xds2mos.umat as
setings angles (default is as a U matrix)
-h
--help
Print this help message.
""" % _progname
def get_par(_str,match,limit=70,func=float):
start = _str.index(match)+len(match)
tmp = _str[start:start+limit].splitlines()[0].split()
return map(func,tmp)
def parse_correct(infname="CORRECT.LP", infilelocation="."):
"Extract information from XDS output CORRECT.LP and INIT.LP"
# Extract things from CORRECT.LP
corr = openReadClose(os.path.join(infilelocation, infname))
ip = corr.index("PARAMETERS USING ALL IMAGES")+100
corrp = corr[ip:ip+1400]
corri = corr[:1500]
corr_dict = {}
corr_dict["rot"] = get_par(corrp,"ROTATION AXIS")
corr_dict["beam"] = get_par(corrp,"COORDINATES (REC. ANGSTROEM)")
corr_dict["distance"] = get_par(corrp,"DETECTOR DISTANCE (mm)")[0]
corr_dict["origin"] = get_par(corrp,"(PIXELS) OF DIRECT BEAM")
corr_dict["originXDS"] = get_par(corrp,"ORIGIN (PIXELS) AT ")
corr_dict["A"] = get_par(corrp,"CELL A-AXIS")
corr_dict["B"] = get_par(corrp,"CELL B-AXIS")
corr_dict["C"] = get_par(corrp,"CELL C-AXIS")
corr_dict["cell"] = get_par(corrp,"UNIT CELL PARAMETERS")
corr_dict["mosaicity"] = get_par(corrp,"CRYSTAL MOSAICITY (DEGREES)")[0]
iqx, iqy = corri.index("QX=")+3, corri.index("QY=")+3
inx, iny = corri.index("NX=")+3, corri.index("NY=")+3
corr_dict["pixel_size"] = float(corri[iqx:iqx+9]),float(corri[iqy:iqy+9])
corr_dict["pixel_numb"] = int(corri[inx:inx+7]),int(corri[iny:iny+7])
corr_dict["template"] = get_par(corri, "_DATA_FRAMES=",60,str)[0].replace("?","#")
corr_dict["symmetry"] = int(get_par(corrp,"SPACE GROUP NUMBER")[0])
corr_dict["detector_type"] = get_par(corri,"DETECTOR=",40,str)[0]
corr_dict["detector_X"] = get_par(corri,"DETECTOR_X-AXIS=")
corr_dict["detector_Y"] = get_par(corri,"DETECTOR_Y-AXIS=")
corr_dict["phi_init"] = get_par(corri,"STARTING_ANGLE=",15)[0]
corr_dict["num_init"] = get_par(corri,"STARTING_FRAME=",15,int)[0]
corr_dict["delta_phi"] = get_par(corri,"OSCILLATION_RANGE=",15)[0]
corr_dict["divergence_esd"] = get_par(corri,"BEAM_DIVERGENCE_E.S.D.=",15)[0]
corr_dict["resolution_range"] = get_par(corri,"INCLUDE_RESOLUTION_RANGE=",20)
corr_dict["friedel"] = get_par(corri,"FRIEDEL'S_LAW=",7,str)[0]
corr_dict["polarization"] = get_par(corri,"FRACTION_OF_POLARIZATION=",8)[0]
return corr_dict
def parse_integrate(infname="INTEGRATE.LP", infilelocation="."):
"Extract information from XDS output CORRECT.LP and INIT.LP"
# Extract things from CORRECT.LP
integ = openReadClose(os.path.join(infilelocation, infname))
integs = integ.split(" PROCESSING OF IMAGES ")[1:]
integi = integ[:1500]
all_par_dicts = []
for integp in integs:
par_dict = {}
tag_num = integp[:25].split()
#image_number = int(tag_num[0]), int(tag_num[2])
par_dict["image_integ_start"] = int(tag_num[0])
par_dict["rot"] = get_par(integp,"ROTATION AXIS")
par_dict["beam"] = get_par(integp,"COORDINATES (REC. ANGSTROEM)")
par_dict["distance"] = get_par(integp,"DETECTOR DISTANCE (mm)")[0]
par_dict["origin"] = get_par(integp,"(PIXELS) OF DIRECT BEAM")
par_dict["originXDS"] = get_par(integp,"ORIGIN (PIXELS) AT ")
par_dict["A"] = get_par(integp,"CELL A-AXIS")
par_dict["B"] = get_par(integp,"CELL B-AXIS")
par_dict["C"] = get_par(integp,"CELL C-AXIS")
par_dict["cell"] = get_par(integp,"UNIT CELL PARAMETERS")
par_dict["mosaicity"] = get_par(integp,"CRYSTAL MOSAICITY (DEGREES)")[0]
iqx, iqy = integi.index("QX=")+3, integi.index("QY=")+3
inx, iny = integi.index("NX=")+3, integi.index("NY=")+3
par_dict["pixel_size"] = float(integi[iqx:iqx+9]),float(integi[iqy:iqy+9])
par_dict["pixel_numb"] = int(integi[inx:inx+7]),int(integi[iny:iny+7])
par_dict["template"] = get_par(integi, "_DATA_FRAMES=",60,str)[0].replace("?","#")
par_dict["symmetry"] = int(get_par(integp,"SPACE GROUP NUMBER")[0])
#par_dict["detector_type"] = get_par(integi,"DETECTOR=",40,str)[0]
#par_dict["detector_X"] = get_par(integi,"DETECTOR_X-AXIS=")
#par_dict["detector_Y"] = get_par(integi,"DETECTOR_Y-AXIS=")
par_dict["phi_init"] = get_par(integi,"STARTING_ANGLE=",9)[0]
par_dict["num_init"] = get_par(integi,"STARTING_FRAME=",9,int)[0]
par_dict["delta_phi"] = get_par(integi,"OSCILLATION_RANGE=",9)[0]
#par_dict["divergence_esd"] = get_par(integi,"BEAM_DIVERGENCE_E.S.D.=",9)[0]
all_par_dicts.append(par_dict)
return all_par_dicts
def PARS_xds2mos(xdsPar):
"Convert XDS output parameters to Mosflm input parameters."
mosPar = {}
mosPar["title"] = "xds2mos version: %s" % (__version__)
mosPar["distance"] = abs(xdsPar["distance"])
mosPar["wavelength"] = 1/vec3(xdsPar["beam"]).length()
mosPar["symmetry"] = spg_num2symb[xdsPar["symmetry"]]
mosPar["omega"] = xdsPar["omega"]*r2d
mosPar["twotheta"] = xdsPar["twotheta"]*r2d
xc = xdsPar["origin"][0]
yc = xdsPar["origin"][1]
cosOmega = math.cos(xdsPar["omega"])
sinOmega = math.sin(xdsPar["omega"])
mosPar["beam_x"] = xc*cosOmega + yc*sinOmega
mosPar["beam_y"] = xc*sinOmega + yc*cosOmega
if "detector_type" in xdsPar.keys():
mosPar["detector"] = detector2scanner[xdsPar["detector_type"]]
mosPar["pixel_x"] = xdsPar["pixel_size"][1]
mosPar["pixel_y"] = xdsPar["pixel_size"][0]
mosPar["template"] = xdsPar["template"]
mosPar["extention"] = xdsPar["template"].split(".")[-1]
mosPar["image_numb"] = xdsPar["num_init"]
mosPar["phi_i"] = xdsPar["phi_init"]
mosPar["phi_f"] = xdsPar["phi_init"] + xdsPar["delta_phi"]
if "mosaicity" in xdsPar:
mosPar["mosaicity"] = mosaicity_conversion_factor*xdsPar["mosaicity"]
return mosPar
def openReadClose(filename):
f = open(filename)
r = f.read()
f.close()
return r
def printmat(mat, name="", format="%12.8f"):
if name: print "%s" % (name)
if isinstance(mat, mat3):
for i in 0,1,2: print 3*format % tuple(mat.getRow(i))
else:
for l in mat: print 3*format % tuple(l)
if __name__=='__main__':
import getopt
_debug = False
_write_out_angles = False
DO_PG_PERMUTATIONS = True
_start_mosflm = False
_verbose = False
short_opt = "ahpsv"
long_opt = ["angles", "help", "pg-permutations", "start-mosflm", "verbose"]
try:
opts, inputf = getopt.getopt(sys.argv[1:], short_opt, long_opt)
except getopt.GetoptError:
# print help information and exit:
print _usage
sys.exit(2)
for o, a in opts:
if o in ("-v", "--verbose"):
_verbose = True
if o in ("-h", "--help"):
print _usage
sys.exit()
print "\n XOintegrate_drift version: %s\n" % (__version__)
print " Extracting orientations from:\t\t%s" % inputf[0:]
if "CORRECT.LP" not in inputf:
print "ERROR: Can't read CORRECT.LP."
sys.exit()
else:
XOparser = XDSParser("CORRECT.LP")
Bmos = BusingLevy(XOparser.cell_r)
#corr_par_dict = parse_correct()
corr_par_dict = XOparser.dict
#UBmos = XOparser.UBxds_to_mos()/XOparser.dict["wavelength"]
#Umos = (UBmos) * Bmos.inverse()
#is_orthogonal(Umos)
A = vec3(corr_par_dict["A"])
B = vec3(corr_par_dict["B"])
C = vec3(corr_par_dict["C"])
UBR = mat3(A, B, C).transpose()
printmat(UBR, '\n Reference UBR', "%12.6f")
# apply symmetry operator permutation for easier comparation
# of INTEGRATE and CORRECT orientation matrices.
Up = mat3(vec3(-1, 0, 0), vec3(0, 1, 0), vec3(0, 0, -1))
UBR = Up * UBR
printmat(UBR, '\n Reference UBR after point-group permutation', "%12.6f")
if "INTEGRATE.LP" not in inputf:
print "ERROR: Can't read INTEGRATE.LP."
sys.exit()
else:
integ_par_dicts = parse_integrate()
integ_par_dict = integ_par_dicts[0]
A = vec3(integ_par_dict["A"])
B = vec3(integ_par_dict["B"])
C = vec3(integ_par_dict["C"])
UBR = mat3(A, B, C).transpose()
printmat(UBR, '\n UBR', "%12.6f")
for integ_par_dict in integ_par_dicts[1:]:
A = vec3(integ_par_dict["A"])
B = vec3(integ_par_dict["B"])
C = vec3(integ_par_dict["C"])
UBRi = mat3(A, B, C).transpose()
#printmat(UBRi, '\n UBR', "%12.6f")
Udiff = UBR * UBRi.inverse()
#printmat(Udiff, '\n U*U-1', "%12.6f")
axis, angle = axis_and_angle(Udiff)
#print "\n>>> DIFFERENCE_1:\n"
print "%4d " % integ_par_dict["image_integ_start"],
print " Axis_i: %9.5f%9.5f%9.5f" % tuple(axis),
print "Angle_i: %10.5f degree" % (angle*R2D)
|
jsburg/xdsme
|
XOconv/XOintegrate_drift.py
|
Python
|
bsd-3-clause
| 10,074
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from petition.models import Organization, Petition, PytitionUser, Permission
from .utils import add_default_data
class EditPetitionViewTest(TestCase):
"""Test index view"""
@classmethod
def setUpTestData(cls):
add_default_data()
def login(self, name):
self.client.login(username=name, password=name)
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def tearDown(self):
# Clean up run after every test method.
pass
def test_edit_404(self):
""" Non-existent petition id : should return 404 """
self.login("julia")
response = self.client.get(reverse("edit_petition", args=[1000]))
self.assertEqual(response.status_code, 404)
def test_edit_200(self):
""" edit your own petition while being logged-in """
self.login('julia')
petition = self.pu.petition_set.first()
response = self.client.get(reverse("edit_petition", args=[petition.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['petition'], petition)
self.assertTemplateUsed(response, "petition/edit_petition.html")
def test_edit_loggedout(self):
""" edit your own petition while being logged out """
self.login('julia')
petition = self.pu.petition_set.first()
self.logout()
response = self.client.get(reverse("edit_petition", args=[petition.id]), follow=True)
self.assertRedirects(response, reverse("login")+"?next="+reverse("edit_petition", args=[petition.id]))
def test_edit_notYourOwnPetition(self):
""" editing somebody else's petition """
self.login('julia')
max = PytitionUser.objects.get(user__username="max")
petition = max.petition_set.first()
response = self.client.get(reverse("edit_petition", args=[petition.id]), follow=True)
self.assertRedirects(response, reverse("user_dashboard"))
self.assertTemplateUsed(response, "petition/user_dashboard.html")
def test_edit_notInOrg(self):
""" editing a petition owned by an Organization the logged-in user is *NOT* part of """
self.login('sarah')
attac = Organization.objects.get(name='Les Amis de la Terre')
petition = attac.petition_set.first()
response = self.client.get(reverse("edit_petition", args=[petition.id]), follow=True)
self.assertRedirects(response, reverse("user_dashboard"))
self.assertTemplateUsed(response, "petition/user_dashboard.html")
def test_edit_InOrgButNoEditPermission(self):
"""
editing a petition owned by an Organization the logged-in user is part of
but without the can_modify_petitions permission
"""
max = self.login('max')
at = Organization.objects.get(name='Les Amis de la Terre')
perm = Permission.objects.get(organization=at, user=max)
perm.can_modify_petitions = False
perm.save()
petition = at.petition_set.first()
response = self.client.get(reverse("edit_petition", args=[petition.id]), follow=True)
self.assertRedirects(response, reverse("user_dashboard"))
def test_edit_InOrgWithEditPerm(self):
"""
editing a petition owned by an Organization the logged-in user is part of
*AND* with the can_modify_petitions permission
"""
self.login('julia')
at = Organization.objects.get(name='Les Amis de la Terre')
petition = at.petition_set.first()
response = self.client.get(reverse("edit_petition", args=[petition.id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['petition'], petition)
self.assertTemplateUsed(response, "petition/edit_petition.html")
def test_edit_post_content_form(self):
julia = self.login('julia')
org = Organization.objects.get(name='RAP')
content_form_data = {
'content_form_submitted': 'yes',
'title': 'toto',
'text': 'tata',
'side_text': 'titi',
'footer_text': 'tutu',
'footer_links': 'tyty',
'sign_form_footer': 'lorem',
'target': 4242,
}
# For an org template
p = Petition.objects.create(title="My Petition", org=org)
response = self.client.post(reverse("edit_petition", args=[p.id]), content_form_data)
self.assertEqual(response.status_code, 200)
p.refresh_from_db()
self.assertTemplateUsed(response, "petition/edit_petition.html")
self.assertEquals(response.context['content_form'].is_valid(), True)
self.assertEquals(response.context['content_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], True)
self.assertEquals(response.context['email_form_submitted'], False)
self.assertEquals(response.context['social_network_form_submitted'], False)
self.assertEquals(response.context['newsletter_form_submitted'], False)
# For an user template
p2 = Petition.objects.create(title="My Petition 2", user=julia)
response = self.client.post(reverse("edit_petition", args=[p2.id]), content_form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/edit_petition.html")
p2.refresh_from_db()
for key, value in content_form_data.items():
if key == "content_form_submitted":
continue
self.assertEquals(getattr(p2, key), value)
self.assertEquals(getattr(p, key), value)
self.assertEquals(response.context['content_form'].is_valid(), True)
self.assertEquals(response.context['content_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], True)
self.assertEquals(response.context['email_form_submitted'], False)
self.assertEquals(response.context['social_network_form_submitted'], False)
self.assertEquals(response.context['newsletter_form_submitted'], False)
for key, value in content_form_data.items():
if key == "content_form_submitted":
continue
self.assertEquals(getattr(p, key), value)
self.assertEquals(getattr(p2, key), value)
def test_edit_petition_POST_email_form(self):
julia = self.login('julia')
org = Organization.objects.get(name='RAP')
email_form_data = {
'email_form_submitted': 'yes',
'confirmation_email_reply': 'toto@tata.com',
}
# For an org template
p = Petition.objects.create(title="My petition", org=org)
response = self.client.post(reverse("edit_petition", args=[p.id]), email_form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/edit_petition.html")
self.assertEquals(response.context['email_form'].is_valid(), True)
self.assertEquals(response.context['email_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], False)
self.assertEquals(response.context['email_form_submitted'], True)
self.assertEquals(response.context['social_network_form_submitted'], False)
self.assertEquals(response.context['newsletter_form_submitted'], False)
p.refresh_from_db()
# For an user template
p2 = Petition.objects.create(title="My petition 2", user=julia)
response = self.client.post(reverse("edit_petition", args=[p2.id]), email_form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/edit_petition.html")
self.assertEquals(response.context['email_form'].is_valid(), True)
self.assertEquals(response.context['email_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], False)
self.assertEquals(response.context['email_form_submitted'], True)
self.assertEquals(response.context['social_network_form_submitted'], False)
self.assertEquals(response.context['newsletter_form_submitted'], False)
p2.refresh_from_db()
for key, value in email_form_data.items():
if key == "email_form_submitted":
continue
self.assertEquals(getattr(p2, key), value)
self.assertEquals(getattr(p, key), value)
def test_edit_petition_POST_social_network_form(self):
julia = self.login('julia')
org = Organization.objects.get(name='RAP')
social_network_form_data = {
'social_network_form_submitted': 'yes',
'twitter_description': 'This is my twitter desc!',
'twitter_image': 'My Twitter img!',
'org_twitter_handle': '@Rap_Asso',
}
# For an org template
p = Petition.objects.create(title="My petition", org=org)
response = self.client.post(reverse("edit_petition", args=[p.id]), social_network_form_data)
self.assertEqual(response.status_code, 200)
p.refresh_from_db()
self.assertTemplateUsed(response, "petition/edit_petition.html")
self.assertEquals(response.context['social_network_form'].is_valid(), True)
self.assertEquals(response.context['social_network_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], False)
self.assertEquals(response.context['email_form_submitted'], False)
self.assertEquals(response.context['social_network_form_submitted'], True)
self.assertEquals(response.context['newsletter_form_submitted'], False)
# For an user template
p2 = Petition.objects.create(title="My petition 2", user=julia)
response2 = self.client.post(reverse("edit_petition", args=[p2.id]), social_network_form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/edit_petition.html")
p2.refresh_from_db()
for key, value in social_network_form_data.items():
if key == "social_network_form_submitted":
continue
self.assertEquals(getattr(p2, key), value)
self.assertEquals(getattr(p, key), value)
self.assertEquals(response2.context['social_network_form'].is_valid(), True)
self.assertEquals(response2.context['social_network_form'].is_bound, True)
self.assertEquals(response2.context['content_form_submitted'], False)
self.assertEquals(response2.context['email_form_submitted'], False)
self.assertEquals(response2.context['social_network_form_submitted'], True)
self.assertEquals(response2.context['newsletter_form_submitted'], False)
def test_edit_template_POST_newsletter_form(self):
julia = self.login('julia')
org = Organization.objects.get(name='RAP')
newsletter_form_data = {
'newsletter_form_submitted': 'yes',
'has_newsletter': 'on',
'newsletter_subscribe_http_data': 'blah',
'newsletter_subscribe_http_mailfield': 'blih',
'newsletter_subscribe_http_mailfield': 'bluh',
'newsletter_subscribe_mail_subject': 'bloh',
'newsletter_subscribe_mail_from': 'toto@titi.com',
'newsletter_subscribe_mail_to': 'titi@toto.com',
'newsletter_subscribe_method': 'POST',
'newsletter_subscribe_mail_smtp_host': 'localhost',
'newsletter_subscribe_mail_smtp_port': 1234,
'newsletter_subscribe_mail_smtp_user': 'root',
'newsletter_subscribe_mail_smtp_password': 'rootpassword',
'newsletter_subscribe_mail_smtp_tls': 'on',
'newsletter_subscribe_mail_smtp_starttls': '',
}
# For an org template
p = Petition.objects.create(title="My petition", org=org)
response = self.client.post(reverse("edit_petition", args=[p.id]), newsletter_form_data)
self.assertEqual(response.status_code, 200)
p.refresh_from_db()
self.assertTemplateUsed(response, "petition/edit_petition.html")
self.assertEquals(response.context['newsletter_form'].is_valid(), True)
self.assertEquals(response.context['newsletter_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], False)
self.assertEquals(response.context['email_form_submitted'], False)
self.assertEquals(response.context['social_network_form_submitted'], False)
self.assertEquals(response.context['newsletter_form_submitted'], True)
# For an user template
p2 = Petition.objects.create(title="My petition 2", user=julia)
response2 = self.client.post(reverse("edit_petition", args=[p2.id]), newsletter_form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/edit_petition.html")
p2.refresh_from_db()
newsletter_form_data['has_newsletter'] = True
newsletter_form_data['newsletter_subscribe_mail_smtp_tls'] = True
newsletter_form_data['newsletter_subscribe_mail_smtp_starttls'] = False
for key, value in newsletter_form_data.items():
if key == "newsletter_form_submitted":
continue
self.assertEquals(getattr(p2, key), value)
self.assertEquals(getattr(p, key), value)
self.assertEquals(response2.context['newsletter_form'].is_valid(), True)
self.assertEquals(response2.context['newsletter_form'].is_bound, True)
self.assertEquals(response2.context['content_form_submitted'], False)
self.assertEquals(response2.context['email_form_submitted'], False)
self.assertEquals(response2.context['social_network_form_submitted'], False)
self.assertEquals(response2.context['newsletter_form_submitted'], True)
def test_edit_template_POST_style_form(self):
julia = self.login('julia')
org = Organization.objects.get(name='RAP')
style_form_data = {
'style_form_submitted': 'yes',
'bgcolor': '33ccff',
'linear_gradient_direction': 'to right',
'gradient_from': '0000ff',
'gradient_to': 'ff0000',
}
# For an org template
p = Petition.objects.create(title="My petition", org=org)
response = self.client.post(reverse("edit_petition", args=[p.id]), style_form_data)
self.assertEqual(response.status_code, 200)
p.refresh_from_db()
self.assertTemplateUsed(response, "petition/edit_petition.html")
self.assertEquals(response.context['style_form'].is_valid(), True)
self.assertEquals(response.context['style_form'].is_bound, True)
self.assertEquals(response.context['content_form_submitted'], False)
self.assertEquals(response.context['email_form_submitted'], False)
self.assertEquals(response.context['social_network_form_submitted'], False)
self.assertEquals(response.context['newsletter_form_submitted'], False)
self.assertEquals(response.context['style_form_submitted'], True)
# For an user template
p2 = Petition.objects.create(title="My petition 2", user=julia)
response2 = self.client.post(reverse("edit_petition", args=[p2.id]), style_form_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/edit_petition.html")
p2.refresh_from_db()
style_form_data['bgcolor'] = '#' + style_form_data['bgcolor']
style_form_data['gradient_from'] = '#' + style_form_data['gradient_from']
style_form_data['gradient_to'] = '#' + style_form_data['gradient_to']
for key, value in style_form_data.items():
if key == "style_form_submitted":
continue
self.assertEquals(getattr(p2, key), value)
self.assertEquals(getattr(p, key), value)
self.assertEquals(response.context['style_form'].is_valid(), True)
self.assertEquals(response.context['style_form'].is_bound, True)
self.assertEquals(response2.context['content_form_submitted'], False)
self.assertEquals(response2.context['email_form_submitted'], False)
self.assertEquals(response2.context['social_network_form_submitted'], False)
self.assertEquals(response2.context['newsletter_form_submitted'], False)
self.assertEquals(response.context['style_form_submitted'], True)
|
fallen/Pytition
|
pytition/petition/tests/tests_EditPetitionView.py
|
Python
|
bsd-3-clause
| 16,750
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from registration.compat import User
from registration.forms import RegistrationForm
class SimpleBackendViewTests(TestCase):
urls = 'registration.backends.simple.urls'
def test_allow(self):
"""
The setting ``REGISTRATION_OPEN`` appropriately controls
whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
settings.REGISTRATION_OPEN = False
# Now all attempts to hit the register view should redirect to
# the 'registration is closed' message.
resp = self.client.get(reverse('registration_register'))
self.assertRedirects(resp, reverse('registration_disallowed'))
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertRedirects(resp, reverse('registration_disallowed'))
settings.REGISTRATION_OPEN = old_allowed
def test_registration_get(self):
"""
HTTP ``GET`` to the registration view uses the appropriate
template and populates a registration form into the context.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp,
'registration/registration_form.html')
self.failUnless(isinstance(resp.context['form'],
RegistrationForm))
def test_registration(self):
"""
Registration creates a new account and logs the user in.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
new_user = User.objects.get(username='bob')
self.assertEqual(302, resp.status_code)
self.failUnless(new_user.get_absolute_url() in resp['Location'])
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must be active.
self.failUnless(new_user.is_active)
# New user must be logged in.
resp = self.client.get(reverse('registration_register'))
self.failUnless(resp.context['user'].is_authenticated())
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'notsecret'})
self.assertEqual(200, resp.status_code)
self.failIf(resp.context['form'].is_valid())
|
mattdeboard/django-registration
|
registration/tests/simple_backend.py
|
Python
|
bsd-3-clause
| 3,463
|
from django.contrib import admin
from .models import Organization, OrganizationProfile, Category, Person
from django.utils.translation import ugettext_lazy as _
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
list_display = ['short_name', 'krs', 'register_at', 'tag_list', 'category_list', 'is_active']
actions = ['make_active', 'make_inactive']
search_fields = ['name', 'address', 'krs']
list_filter = ['is_active', 'tags', 'categories', 'register_at']
preserve_filters = True
def get_queryset(self, request):
return super(OrganizationAdmin, self).get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
def category_list(self, obj):
return u", ".join(o.name for o in obj.categories.all())
def make_active(self, request, queryset):
queryset.update(is_active=True)
make_active.short_description = _('Mark selected organizations as active')
def make_inactive(self, request, queryset):
queryset.update(is_active=False)
make_inactive.short_description = _('Mark selected organizations as inactive')
@admin.register(OrganizationProfile)
class OrganizationProfileAdmin(admin.ModelAdmin):
# list_display = ['organization__short_name', 'organization__krs', 'organization__register_at',
# 'organization__tag_list', 'organization__is_active']
# search_fields = ['organization__name', 'organization__address']
pass
class CategoriesInline(admin.TabularInline):
model = Organization.categories.through
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
inlines = [
CategoriesInline
]
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
pass
|
rafal-jaworski/bazaNGObackend
|
src/bazango/contrib/organization/admin.py
|
Python
|
bsd-3-clause
| 1,802
|
from django.utils.translation import ugettext_lazy as _
import itertools
class DefaultRoles(object):
VIEWER = 'viewer'
OBSERVER = 'observer'
PARTICIPANT = 'participant'
PROPOSER = 'proposer'
CONTRIBUTOR = 'contributor'
EDITOR = 'editor'
OPERATOR = 'operator'
DECIDER = 'decider'
MANAGER = 'manager'
permissions = {}
permissions[VIEWER] = [
'communities.access_community',
'issues.viewclosed_issue',
'issues.viewclosed_proposal',
'meetings.view_meeting',
]
permissions[OBSERVER] = permissions[VIEWER] + [
'issues.viewopen_issue',
'issues.viewopen_proposal',
'communities.viewupcoming_community',
'issues.vote',
'issues.proposal_board_vote_self',
'issues.vote_ranking',
]
permissions[PARTICIPANT] = permissions[OBSERVER] + [
'issues.view_proposal_in_discussion',
'communities.viewupcoming_draft',
'issues.view_referendum_results',
'issues.view_update_status',
'issues.view_straw_vote_result',
]
permissions[PROPOSER] = permissions[PARTICIPANT] + [
'issues.add_proposal',
]
permissions[CONTRIBUTOR] = permissions[PROPOSER] + [
'issues.add_issue',
]
permissions[EDITOR] = permissions[CONTRIBUTOR] + [
'issues.editopen_issue',
'issues.editopen_proposal',
'issues.edittask_proposal',
]
permissions[OPERATOR] = permissions[CONTRIBUTOR] + [
'issues.add_issuecomment',
'issues.edittask_proposal',
'community.editupcoming_community',
'community.editparticipants_community',
'community.editsummary_community', # ???
'community.invite_member',
'issues.move_to_referendum',
'issues.proposal_board_vote',
]
permissions[DECIDER] = permissions[OPERATOR] + [
'issues.editopen_issuecomment',
'community.editagenda_community',
'issues.acceptopen_proposal',
'meetings.add_meeting', # == Close Meeting
'issues.edit_referendum',
'issues.chairman_vote',
'users.show_member_profile',
]
permissions[MANAGER] = permissions[DECIDER] + [
'issues.editopen_issue',
'issues.editclosed_issue',
'issues.editclosed_issuecomment',
'issues.editopen_proposal',
'issues.editclosed_proposal',
'issues.acceptclosed_proposal',
]
class DefaultGroups(object):
MEMBER = "member"
BOARD = "board"
SECRETARY = "secretary"
CHAIRMAN = "chairman"
permissions = {}
permissions[MEMBER] = frozenset(DefaultRoles.permissions[DefaultRoles.OBSERVER])
permissions[BOARD] = frozenset(DefaultRoles.permissions[DefaultRoles.PARTICIPANT])
permissions[SECRETARY] = frozenset(DefaultRoles.permissions[DefaultRoles.OPERATOR])
permissions[CHAIRMAN] = frozenset(DefaultRoles.permissions[DefaultRoles.DECIDER] +
DefaultRoles.permissions[DefaultRoles.EDITOR])
CHOICES = (
(MEMBER, _("member")),
(BOARD, _("board")),
(SECRETARY, _("secretary")),
(CHAIRMAN, _("chairman")),
)
ALL_PERMISSIONS = frozenset(itertools.chain(*DefaultGroups.permissions.values()))
|
hasadna/OpenCommunity
|
src/users/default_roles.py
|
Python
|
bsd-3-clause
| 4,312
|
import contextlib
import errno
import gc
import os
import random
import socket
import sys
import traceback
import unittest
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import greenhouse
port = lambda: 8000 + os.getpid() # because i want to run multiprocess nose
TESTING_TIMEOUT = 0.05
GTL = greenhouse.Lock()
class StateClearingTestCase(unittest.TestCase):
def setUp(self):
GTL.acquire()
state = greenhouse.scheduler.state
state.awoken_from_events.clear()
state.timed_paused.clear()
state.paused[:] = []
state.descriptormap.clear()
state.to_run.clear()
del state.global_exception_handlers[:]
state.local_exception_handlers.clear()
del state.global_hooks[:]
state.local_to_hooks.clear()
state.local_from_hooks.clear()
state.raise_in_main = None
greenhouse.reset_poller()
def tearDown(self):
gc.collect()
GTL.release()
@contextlib.contextmanager
def socketpair(self):
server = greenhouse.Socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while 1:
try:
port = random.randrange(1025, 65536)
server.bind(("", port))
except socket.error, exc:
if exc.args[0] != errno.EADDRINUSE:
raise
else:
break
server.listen(5)
client = greenhouse.Socket()
client.connect(("", port))
handler, addr = server.accept()
server.close()
yield client, handler
client.close()
handler.close()
|
teepark/greenhouse
|
tests/test_base.py
|
Python
|
bsd-3-clause
| 1,699
|
#!/usr/bin/env python
import locale
import sys
import six
# Below causes issues in some locales and noone knows why it was included so commenting out for now
# locale.setlocale(locale.LC_NUMERIC, "")
class Table:
def format_num(self, num):
"""Format a number according to given places.
Adds commas, etc. Will truncate floats into ints!"""
try:
if "." in num:
inum = float(num)
return locale.format("%.2f", (0, inum), True)
else:
inum = int(num)
return locale.format("%.*f", (0, inum), True)
except (ValueError, TypeError):
return str(num.encode('utf-8')) if isinstance(num, six.string_types) else str(num)
def get_max_width(self, table, index):
"""Get the maximum width of the given column index"""
return max([len(self.format_num(row[index])) for row in table])
def pprint_table(self, table):
"""Prints out a table of data, padded for alignment
@param table: The table to print. A list of lists.
Each row must have the same number of columns. """
col_paddings = []
out = ""
for i in range(len(table[0])):
col_paddings.append(self.get_max_width(table, i))
for row in table:
# left col
out += str(row[0]).ljust(col_paddings[0] + 1)
# rest of the cols
for i in range(1, len(row)):
col = self.format_num(row[i]).rjust(col_paddings[i] + 2)
out += col
out += "\n"
return out
if __name__ == "__main__":
T = Table()
T.bumppath = '/home/jmht/ample-dev1/examples/toxd-example/ROSETTA_MR_3/MRBUMP/cluster_run1'
T.cluster = True
table = T.maketable()
out = sys.stdout
T.pprint_table(out, table)
|
rigdenlab/ample
|
ample/util/printTable.py
|
Python
|
bsd-3-clause
| 1,850
|
# -*- coding: utf-8 -*-
"""
celery.task
~~~~~~~~~~~
This is the old task module, it should not be used anymore,
import from the main 'celery' module instead.
If you're looking for the decorator implementation then that's in
``celery.app.base.Celery.task``.
"""
from __future__ import absolute_import
from celery._state import current_app, current_task as current
from celery.__compat__ import MagicModule, recreate_module
from celery.local import Proxy
__all__ = [
'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task',
'group', 'chord', 'subtask', 'TaskSet',
]
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
from celery.canvas import group, chord, subtask
from .base import BaseTask, Task, PeriodicTask, task, periodic_task
from .sets import TaskSet
class module(MagicModule):
def __call__(self, *args, **kwargs):
return self.task(*args, **kwargs)
old_module, new_module = recreate_module( # pragma: no cover
__name__,
by_module={
'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask',
'task', 'periodic_task'],
'celery.canvas': ['group', 'chord', 'subtask'],
'celery.task.sets': ['TaskSet'],
},
base=module,
__package__='celery.task',
__file__=__file__,
__path__=__path__,
__doc__=__doc__,
current=current,
discard_all=Proxy(lambda: current_app.control.purge),
backend_cleanup=Proxy(
lambda: current_app.tasks['celery.backend_cleanup']
),
)
|
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/task/__init__.py
|
Python
|
bsd-3-clause
| 1,731
|
# -*- coding: utf-8 -*-
import datetime
import hashlib
import itertools
import json
import os
import re
import time
import urlparse
import uuid
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models import signals as dbsignals, Max, Q
from django.dispatch import receiver
from django.utils.translation import trans_real as translation
import caching.base as caching
import commonware.log
import json_field
from cache_nuggets.lib import memoize, memoize_key
from jinja2.filters import do_dictsort
from tower import ugettext as _
from tower import ugettext_lazy as _lazy
import amo
import mkt
from amo.utils import (JSONEncoder, slugify, smart_path, sorted_groupby,
urlparams)
from lib.crypto import packaged
from lib.iarc.client import get_iarc_client
from lib.iarc.utils import get_iarc_app_title, render_xml
from lib.utils import static_url
from mkt.access import acl
from mkt.constants import APP_FEATURES, apps, iarc_mappings
from mkt.constants.applications import DEVICE_TYPES
from mkt.constants.payments import PROVIDER_CHOICES
from mkt.files.models import File, nfd_str
from mkt.files.utils import parse_addon, WebAppParser
from mkt.prices.models import AddonPremium, Price
from mkt.ratings.models import Review
from mkt.regions.utils import parse_region
from mkt.site.decorators import skip_cache, use_master, write
from mkt.site.helpers import absolutify
from mkt.site.mail import send_mail
from mkt.site.models import (DynamicBoolFieldsMixin, ManagerBase, ModelBase,
OnChangeMixin)
from mkt.site.storage_utils import copy_stored_file
from mkt.tags.models import Tag
from mkt.translations.fields import (PurifiedField, save_signal,
TranslatedField, Translation)
from mkt.translations.models import attach_trans_dict
from mkt.translations.utils import find_language, to_language
from mkt.users.models import UserForeignKey, UserProfile
from mkt.versions.models import Version
from mkt.webapps import query, signals
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.utils import (dehydrate_content_rating, get_locale_properties,
get_supported_locales)
log = commonware.log.getLogger('z.addons')
def clean_slug(instance, slug_field='app_slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible as it's used by Webapps
and maybe less in the future. :-D
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation,
# or the id of the instance, or in last resort the model name.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
elif instance.id:
slug = str(instance.id)
else:
slug = instance.__class__.__name__
max_length = instance._meta.get_field_by_name(slug_field)[0].max_length
slug = slugify(slug)[:max_length]
if BlacklistedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon.
# Also, make sure we use the base class (eg Webapp, which inherits from
# Addon, shouldn't clash with addons). This is extra paranoid, as webapps
# have a different slug field, but just in case we need this in the future.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
# Leave space for "-" and 99 clashes.
slug = slugify(slug)[:max_length - 3]
# There is a clash, so find a suffix that will make this slug unique.
prefix = '%s-' % slug
lookup = {'%s__startswith' % slug_field: prefix}
clashes = qs.filter(**lookup)
# Try numbers between 1 and the number of clashes + 1 (+ 1 because we
# start the range at 1, not 0):
# if we have two clashes "foo-1" and "foo-2", we need to try "foo-x"
# for x between 1 and 3 to be absolutely sure to find an available one.
for idx in range(1, len(clashes) + 2):
new = ('%s%s' % (prefix, idx))[:max_length]
if new not in clashes:
slug = new
break
else:
# This could happen. The current implementation (using
# ``[:max_length -3]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 3`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError
setattr(instance, slug_field, slug)
return instance
class AddonDeviceType(ModelBase):
addon = models.ForeignKey('Webapp', db_constraint=False)
device_type = models.PositiveIntegerField(
default=amo.DEVICE_DESKTOP, choices=do_dictsort(amo.DEVICE_TYPES),
db_index=True)
class Meta:
db_table = 'addons_devicetypes'
unique_together = ('addon', 'device_type')
def __unicode__(self):
return u'%s: %s' % (self.addon.name, self.device.name)
@property
def device(self):
return amo.DEVICE_TYPES[self.device_type]
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
def attach_devices(addons):
addon_dict = dict((a.id, a) for a in addons)
devices = (AddonDeviceType.objects.filter(addon__in=addon_dict)
.values_list('addon', 'device_type'))
for addon, device_types in sorted_groupby(devices, lambda x: x[0]):
addon_dict[addon].device_ids = [d[1] for d in device_types]
def attach_prices(addons):
addon_dict = dict((a.id, a) for a in addons)
prices = (AddonPremium.objects
.filter(addon__in=addon_dict,
addon__premium_type__in=amo.ADDON_PREMIUMS)
.values_list('addon', 'price__price'))
for addon, price in prices:
addon_dict[addon].price = price
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Webapp, addons)
def attach_tags(addons):
addon_dict = dict((a.id, a) for a in addons)
qs = (Tag.objects.not_blacklisted().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class AddonUser(caching.CachingMixin, models.Model):
addon = models.ForeignKey('Webapp')
user = UserForeignKey()
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_lazy(u'Listed'), default=True)
position = models.IntegerField(default=0)
objects = caching.CachingManager()
def __init__(self, *args, **kwargs):
super(AddonUser, self).__init__(*args, **kwargs)
self._original_role = self.role
self._original_user_id = self.user_id
class Meta:
db_table = 'addons_users'
unique_together = (('addon', 'user'), )
class Preview(ModelBase):
addon = models.ForeignKey('Webapp', related_name='previews')
filetype = models.CharField(max_length=25)
thumbtype = models.CharField(max_length=25)
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = json_field.JSONField(max_length=25, default={})
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
def _image_url(self, url_template):
if self.modified is not None:
if isinstance(self.modified, unicode):
self.modified = datetime.datetime.strptime(self.modified,
'%Y-%m-%dT%H:%M:%S')
modified = int(time.mktime(self.modified.timetuple()))
else:
modified = 0
args = [self.id / 1000, self.id, modified]
if '.png' not in url_template:
args.insert(2, self.file_extension)
return url_template % tuple(args)
def _image_path(self, url_template):
args = [self.id / 1000, self.id]
if '.png' not in url_template:
args.append(self.file_extension)
return url_template % tuple(args)
def as_dict(self, src=None):
d = {'full': urlparams(self.image_url, src=src),
'thumbnail': urlparams(self.thumbnail_url, src=src),
'caption': unicode(self.caption)}
return d
@property
def is_landscape(self):
size = self.image_size
if not size:
return False
return size[0] > size[1]
@property
def file_extension(self):
# Assume that blank is an image.
if not self.filetype:
return 'png'
return self.filetype.split('/')[1]
@property
def thumbnail_url(self):
return self._image_url(static_url('PREVIEW_THUMBNAIL_URL'))
@property
def image_url(self):
return self._image_url(static_url('PREVIEW_FULL_URL'))
@property
def thumbnail_path(self):
return self._image_path(settings.PREVIEW_THUMBNAIL_PATH)
@property
def image_path(self):
return self._image_path(settings.PREVIEW_FULL_PATH)
@property
def thumbnail_size(self):
return self.sizes.get('thumbnail', []) if self.sizes else []
@property
def image_size(self):
return self.sizes.get('image', []) if self.sizes else []
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
class BlacklistedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_blacklistedslug'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
def reverse_version(version):
"""
The try/except AttributeError allows this to be used where the input is
ambiguous, and could be either an already-reversed URL or a Version object.
"""
if version:
try:
return reverse('version-detail', kwargs={'pk': version.pk})
except AttributeError:
return version
return
class WebappManager(ManagerBase):
def __init__(self, include_deleted=False):
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_query_set(self):
qs = super(WebappManager, self).get_query_set()
qs = qs._clone(klass=query.IndexQuerySet)
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
return qs.transform(Webapp.transformer)
def valid(self):
return self.filter(status__in=amo.LISTED_STATUSES,
disabled_by_user=False)
def visible(self):
return self.filter(status__in=amo.LISTED_STATUSES,
disabled_by_user=False)
@skip_cache
def pending_in_region(self, region):
"""
Apps that have been approved by reviewers but unapproved by
reviewers in special regions (e.g., China).
"""
region = parse_region(region)
column_prefix = '_geodata__region_%s' % region.slug
return self.filter(**{
# Only nominated apps should show up.
'%s_nominated__isnull' % column_prefix: False,
'status__in': amo.WEBAPPS_APPROVED_STATUSES,
'disabled_by_user': False,
'escalationqueue__isnull': True,
'%s_status' % column_prefix: amo.STATUS_PENDING,
}).order_by('-%s_nominated' % column_prefix)
def rated(self):
"""IARC."""
return self.exclude(content_ratings__isnull=True)
def by_identifier(self, identifier):
"""
Look up a single app by its `id` or `app_slug`.
If the identifier is coercable into an integer, we first check for an
ID match, falling back to a slug check (probably not necessary, as
there is validation preventing numeric slugs). Otherwise, we only look
for a slug match.
"""
try:
return self.get(id=identifier)
except (ObjectDoesNotExist, ValueError):
return self.get(app_slug=identifier)
class UUIDModelMixin(object):
"""
A mixin responsible for assigning a uniquely generated
UUID at save time.
"""
def save(self, *args, **kwargs):
self.assign_uuid()
return super(UUIDModelMixin, self).save(*args, **kwargs)
def assign_uuid(self):
"""Generates a UUID if self.guid is not already set."""
if not hasattr(self, 'guid'):
raise AttributeError(
'A UUIDModel must contain a charfield called guid')
if not self.guid:
max_tries = 10
tried = 1
guid = str(uuid.uuid4())
while tried <= max_tries:
if not type(self).objects.filter(guid=guid).exists():
self.guid = guid
break
else:
guid = str(uuid.uuid4())
tried += 1
else:
raise ValueError('Could not auto-generate a unique UUID')
class Webapp(UUIDModelMixin, OnChangeMixin, ModelBase):
STATUS_CHOICES = amo.STATUS_CHOICES.items()
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
# This column is only used for webapps, so they can have a slug namespace
# separate from addons and personas.
app_slug = models.CharField(max_length=30, unique=True, null=True,
blank=True)
name = TranslatedField(default=None)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
status = models.PositiveIntegerField(
choices=STATUS_CHOICES, db_index=True, default=0)
highest_status = models.PositiveIntegerField(
choices=STATUS_CHOICES, default=0,
help_text='An upper limit for what an author can change.',
db_column='higheststatus')
icon_type = models.CharField(max_length=25, blank=True,
db_column='icontype')
icon_hash = models.CharField(max_length=8, blank=True, null=True)
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
privacy_policy = PurifiedField(db_column='privacypolicy')
average_rating = models.FloatField(max_length=255, default=0, null=True,
db_column='averagerating')
bayesian_rating = models.FloatField(default=0, db_index=True,
db_column='bayesianrating')
total_reviews = models.PositiveIntegerField(default=0,
db_column='totalreviews')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads', db_index=True)
total_downloads = models.PositiveIntegerField(
default=0, db_column='totaldownloads')
last_updated = models.DateTimeField(
db_index=True, null=True,
help_text='Last time this add-on had a file/version update')
disabled_by_user = models.BooleanField(default=False, db_index=True,
db_column='inactive')
public_stats = models.BooleanField(default=False, db_column='publicstats')
authors = models.ManyToManyField('users.UserProfile', through='AddonUser',
related_name='addons')
categories = json_field.JSONField(default=None)
premium_type = models.PositiveIntegerField(
choices=amo.ADDON_PREMIUM_TYPES.items(), default=amo.ADDON_FREE)
manifest_url = models.URLField(max_length=255, blank=True, null=True)
app_domain = models.CharField(max_length=255, blank=True, null=True,
db_index=True)
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
_latest_version = models.ForeignKey(Version, db_column='latest_version',
on_delete=models.SET_NULL,
null=True, related_name='+')
publish_type = models.PositiveIntegerField(default=0)
mozilla_contact = models.EmailField(blank=True)
vip_app = models.BooleanField(default=False)
priority_review = models.BooleanField(default=False)
# Whether the app is packaged or not (aka hosted).
is_packaged = models.BooleanField(default=False, db_index=True)
enable_new_regions = models.BooleanField(default=True, db_index=True)
# Annotates disabled apps from the Great IARC purge for auto-reapprove.
# Note: for currently PUBLIC apps only.
iarc_purged = models.BooleanField(default=False)
# This is the public_id to a Generic Solitude Product
solitude_public_id = models.CharField(max_length=255, null=True,
blank=True)
objects = WebappManager()
with_deleted = WebappManager(include_deleted=True)
class PayAccountDoesNotExist(Exception):
"""The app has no payment account for the query."""
class Meta:
db_table = 'addons'
def __unicode__(self):
return u'%s: %s' % (self.id, self.name)
def save(self, **kw):
self.clean_slug(slug_field='app_slug')
creating = not self.id
super(Webapp, self).save(**kw)
if creating:
# Set the slug once we have an id to keep things in order.
# This breaks test_change_called_on_new_instance_save
self.update(slug='app-%s' % self.id)
# Create Geodata object (a 1-to-1 relationship).
if not hasattr(self, '_geodata'):
Geodata.objects.create(addon=self)
@transaction.commit_on_success
def delete(self, msg='', reason=''):
# To avoid a circular import.
from . import tasks
if self.status == amo.STATUS_DELETED:
return # We're already done.
id = self.id
# Tell IARC this app is delisted from the set_iarc_storefront_data.
tasks.set_storefront_data.delay(self.pk, disable=True)
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
log.debug('Deleting app: %s' % self.id)
to = [settings.FLIGTAR]
user = amo.get_user()
context = {
'atype': 'App',
'authors': [u.email for u in self.authors.all()],
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.app_slug,
'total_downloads': self.total_downloads,
'url': absolutify(self.get_url_path()),
'user_str': ("%s, %s (%s)" % (user.display_name or
user.username, user.email,
user.id) if user else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
TOTAL DOWNLOADS: %(total_downloads)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.debug('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Webapp, instance=self)
self.update(status=amo.STATUS_DELETED, slug=None, app_slug=None,
app_domain=None, _current_version=None)
models.signals.post_delete.send(sender=Webapp, instance=self)
send_mail(subject, email_msg, recipient_list=to)
for preview in previews:
tasks.delete_preview_files.delay(preview)
return True
@use_master
def clean_slug(self, slug_field='app_slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
current_ids = filter(None, (a._current_version_id for a in addons))
latest_ids = filter(None, (a._latest_version_id for a in addons))
all_ids = set(current_ids) | set(latest_ids)
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.debug('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
if addon._latest_version_id == version.id:
addon._latest_version = version
version.addon = addon
@classmethod
def get_indexer(cls):
return WebappIndexer
@classmethod
def from_upload(cls, upload, is_packaged=False):
data = parse_addon(upload)
fields = cls._meta.get_all_field_names()
addon = Webapp(**dict((k, v) for k, v in data.items() if k in fields))
addon.status = amo.STATUS_NULL
locale_is_set = (addon.default_locale and
addon.default_locale in (
settings.AMO_LANGUAGES +
settings.HIDDEN_LANGUAGES) and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(translation.get_language())
addon.is_packaged = is_packaged
if is_packaged:
addon.app_domain = data.get('origin')
else:
addon.manifest_url = upload.name
addon.app_domain = addon.domain_from_url(addon.manifest_url)
addon.save()
Version.from_upload(upload, addon)
amo.log(amo.LOG.CREATE_ADDON, addon)
log.debug('New addon %r from %r' % (addon, upload))
return addon
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
for addon, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon].all_previews = list(previews)
# FIXME: set all_previews to empty list on addons without previews.
@staticmethod
def attach_prices(addons, addon_dict=None):
# FIXME: merge with attach_prices transformer below.
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
# There's a constrained amount of price tiers, may as well load
# them all and let cache machine keep them cached.
prices = dict((p.id, p) for p in Price.objects.all())
# Attach premium addons.
qs = AddonPremium.objects.filter(addon__in=addons)
premium_dict = dict((ap.addon_id, ap) for ap in qs)
# Attach premiums to addons, making sure to attach None to free addons
# or addons where the corresponding AddonPremium is missing.
for addon in addons:
if addon.is_premium():
addon_p = premium_dict.get(addon.id)
if addon_p:
price = prices.get(addon_p.price_id)
if price:
addon_p.price = price
addon_p.addon = addon
addon._premium = addon_p
else:
addon._premium = None
def is_public(self):
"""
True if the app is not disabled and the status is either STATUS_PUBLIC
or STATUS_UNLISTED.
Both statuses are "public" in that they should result in a 200 to the
app detail page.
"""
return (not self.disabled_by_user and
self.status in (amo.STATUS_PUBLIC, amo.STATUS_UNLISTED))
def is_approved(self):
"""
True if the app has status equal to amo.STATUS_APPROVED.
This app has been approved by a reviewer but is currently private and
only visitble to the app authors.
"""
return not self.disabled_by_user and self.status == amo.STATUS_APPROVED
def is_published(self):
"""
True if the app status is amo.STATUS_PUBLIC.
This means we can display the app in listing pages and index it in our
search backend.
"""
return not self.disabled_by_user and self.status == amo.STATUS_PUBLIC
def is_incomplete(self):
return self.status == amo.STATUS_NULL
def is_pending(self):
return self.status == amo.STATUS_PENDING
def is_rejected(self):
return self.status == amo.STATUS_REJECTED
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
def can_become_premium(self):
if self.upsell or self.is_premium():
return False
return True
def is_premium(self):
"""
If the addon is premium. Will include addons that are premium
and have a price of zero. Primarily of use in the devhub to determine
if an app is intending to be premium.
"""
return self.premium_type in amo.ADDON_PREMIUMS
def is_free(self):
"""
This is the opposite of is_premium. Will not include apps that have a
price of zero. Primarily of use in the devhub to determine if an app is
intending to be free.
"""
return not (self.is_premium() and self.premium and
self.premium.price)
def is_free_inapp(self):
return self.premium_type == amo.ADDON_FREE_INAPP
def needs_payment(self):
return (self.premium_type not in
(amo.ADDON_FREE, amo.ADDON_OTHER_INAPP))
def can_be_deleted(self):
return not self.is_deleted
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
return (Webapp.objects.no_cache()
.filter(status=amo.STATUS_PUBLIC,
versions__files__status=amo.STATUS_PUBLIC)
.values('id')
.annotate(last_updated=Max('versions__created')))
@amo.cached_property(writable=True)
def all_previews(self):
return list(self.get_previews())
def get_previews(self):
"""Exclude promo graphics."""
return self.previews.exclude(position=-1)
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def get_mozilla_contacts(self):
return [x.strip() for x in self.mozilla_contact.split(',')]
@amo.cached_property
def upsell(self):
"""Return the upsell or add-on, or None if there isn't one."""
try:
# We set unique_together on the model, so there will only be one.
return self._upsell_from.all()[0]
except IndexError:
pass
def has_author(self, user, roles=None):
"""True if ``user`` is an author with any of the specified ``roles``.
``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If
not specified, has_author will return true if the user has any role.
"""
if user is None or user.is_anonymous():
return False
if roles is None:
roles = dict(amo.AUTHOR_CHOICES).keys()
return AddonUser.objects.filter(addon=self, user=user,
role__in=roles).exists()
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self.all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.MEDIA_URL + '/img/icons/no-preview.png'
def get_purchase_type(self, user):
if user and isinstance(user, UserProfile):
try:
return self.addonpurchase_set.get(user=user).type
except models.ObjectDoesNotExist:
pass
def has_purchased(self, user):
return self.get_purchase_type(user) == amo.CONTRIB_PURCHASE
def is_refunded(self, user):
return self.get_purchase_type(user) == amo.CONTRIB_REFUND
def is_chargeback(self, user):
return self.get_purchase_type(user) == amo.CONTRIB_CHARGEBACK
def can_review(self, user):
if user and self.has_author(user):
return False
else:
return (not self.is_premium() or self.has_purchased(user) or
self.is_refunded(user))
def get_latest_file(self):
"""Get the latest file from the current version."""
cur = self.current_version
if cur:
res = cur.files.order_by('-created')
if res:
return res[0]
@property
def uses_flash(self):
"""
Convenience property until more sophisticated per-version
checking is done for packaged apps.
"""
f = self.get_latest_file()
if not f:
return False
return f.uses_flash
def in_escalation_queue(self):
return self.escalationqueue_set.exists()
def update_names(self, new_names):
"""
Adds, edits, or removes names to match the passed in new_names dict.
Will not remove the translation of the default_locale.
`new_names` is a dictionary mapping of locales to names.
Returns a message that can be used in logs showing what names were
added or updated.
Note: This method doesn't save the changes made to the addon object.
Don't forget to call save() in your calling method.
"""
updated_locales = {}
locales = dict(Translation.objects.filter(id=self.name_id)
.values_list('locale',
'localized_string'))
msg_c = [] # For names that were created.
msg_d = [] # For deletes.
msg_u = [] # For updates.
# Normalize locales.
names = {}
for locale, name in new_names.iteritems():
loc = find_language(locale)
if loc and loc not in names:
names[loc] = name
# Null out names no longer in `names` but exist in the database.
for locale in set(locales) - set(names):
names[locale] = None
for locale, name in names.iteritems():
if locale in locales:
if not name and locale.lower() == self.default_locale.lower():
pass # We never want to delete the default locale.
elif not name: # A deletion.
updated_locales[locale] = None
msg_d.append(u'"%s" (%s).' % (locales.get(locale), locale))
elif name != locales[locale]:
updated_locales[locale] = name
msg_u.append(u'"%s" -> "%s" (%s).' % (
locales[locale], name, locale))
else:
updated_locales[locale] = names.get(locale)
msg_c.append(u'"%s" (%s).' % (name, locale))
if locales != updated_locales:
self.name = updated_locales
return {
'added': ' '.join(msg_c),
'deleted': ' '.join(msg_d),
'updated': ' '.join(msg_u),
}
def update_default_locale(self, locale):
"""
Updates default_locale if it's different and matches one of our
supported locales.
Returns tuple of (old_locale, new_locale) if updated. Otherwise None.
"""
old_locale = self.default_locale
locale = find_language(locale)
if locale and locale != old_locale:
self.update(default_locale=locale)
return old_locale, locale
return None
@property
def premium(self):
"""
Returns the premium object which will be gotten by the transformer,
if its not there, try and get it. Will return None if there's nothing
there.
"""
if not hasattr(self, '_premium'):
try:
self._premium = self.addonpremium
except AddonPremium.DoesNotExist:
self._premium = None
return self._premium
def has_installed(self, user):
if not user or not isinstance(user, UserProfile):
return False
return self.installed.filter(user=user).exists()
@amo.cached_property
def upsold(self):
"""
Return what this is going to upsold from,
or None if there isn't one.
"""
try:
return self._upsell_to.all()[0]
except IndexError:
pass
@property
def icon_url(self):
return self.get_icon_url(32)
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@amo.cached_property(writable=True)
def listed_authors(self):
return UserProfile.objects.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@property
def reviews(self):
return Review.objects.filter(addon=self, reply_to=None)
def get_icon_dir(self):
return os.path.join(settings.ADDON_ICONS_PATH, str(self.id / 1000))
def get_icon_url(self, size):
"""
Returns either the icon URL or a default icon.
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over.
if (size not in amo.APP_ICON_SIZES
and size >= amo.APP_ICON_SIZES[0]):
size = [s for s in amo.APP_ICON_SIZES if s < size][-1]
elif size < amo.APP_ICON_SIZES[0]:
size = amo.APP_ICON_SIZES[0]
# Figure out what to return for an image URL.
if not self.icon_type:
return '%s/%s-%s.png' % (static_url('ADDON_ICONS_DEFAULT_URL'),
'default', size)
elif icon_type_split[0] == 'icon':
return '%s/%s-%s.png' % (static_url('ADDON_ICONS_DEFAULT_URL'),
icon_type_split[1], size)
else:
# [1] is the whole ID, [2] is the directory.
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
# If we don't have the icon_hash set to a dummy string ("never"),
# when the icon is eventually changed, icon_hash will be updated.
suffix = getattr(self, 'icon_hash', None) or 'never'
return static_url('ADDON_ICON_URL') % (
split_id.group(2) or 0, self.id, size, suffix)
@staticmethod
def transformer(apps):
if not apps:
return
apps_dict = dict((a.id, a) for a in apps)
# Set _latest_version, _current_version
Webapp.attach_related_versions(apps, apps_dict)
# Attach previews. Don't use transforms, the only one present is for
# translations and Previews don't have captions in the Marketplace, and
# therefore don't have translations.
Webapp.attach_previews(apps, apps_dict, no_transforms=True)
# Attach prices.
Webapp.attach_prices(apps, apps_dict)
# FIXME: re-use attach_devices instead ?
for adt in AddonDeviceType.objects.filter(addon__in=apps_dict):
if not getattr(apps_dict[adt.addon_id], '_device_types', None):
apps_dict[adt.addon_id]._device_types = []
apps_dict[adt.addon_id]._device_types.append(
DEVICE_TYPES[adt.device_type])
# FIXME: attach geodata and content ratings. Maybe in a different
# transformer that would then be called automatically for the API ?
@staticmethod
def version_and_file_transformer(apps):
"""Attach all the versions and files to the apps."""
# Don't just return an empty list, it will break code that expects
# a query object
if not len(apps):
return apps
ids = set(app.id for app in apps)
versions = (Version.objects.no_cache().filter(addon__in=ids)
.select_related('addon'))
vids = [v.id for v in versions]
files = (File.objects.no_cache().filter(version__in=vids)
.select_related('version'))
# Attach the files to the versions.
f_dict = dict((k, list(vs)) for k, vs in
amo.utils.sorted_groupby(files, 'version_id'))
for version in versions:
version.all_files = f_dict.get(version.id, [])
# Attach the versions to the apps.
v_dict = dict((k, list(vs)) for k, vs in
amo.utils.sorted_groupby(versions, 'addon_id'))
for app in apps:
app.all_versions = v_dict.get(app.id, [])
return apps
def get_public_version(self):
"""Retrieves the latest PUBLIC version of an addon."""
if self.status not in amo.WEBAPPS_APPROVED_STATUSES:
# Apps that aren't in an approved status have no current version.
return None
try:
return (self.versions.no_cache()
.filter(files__status=amo.STATUS_PUBLIC)
.extra(where=[
"""
NOT EXISTS (
SELECT 1 FROM versions as v2
INNER JOIN files AS f2 ON (f2.version_id = v2.id)
WHERE v2.id = versions.id
AND f2.status != %s)""" % amo.STATUS_PUBLIC])[0])
except (IndexError, Version.DoesNotExist):
return None
@write
def update_version(self, ignore=None, _signal=True):
"""
Returns true if we updated the field.
The optional ``ignore`` parameter, if present, is a version to not
consider as part of the update, since it may be in the process of being
deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
current = self.get_public_version()
try:
latest_qs = self.versions.all()
if ignore is not None:
latest_qs = latest_qs.exclude(pk=ignore.pk)
latest = latest_qs.latest()
except Version.DoesNotExist:
latest = None
latest_id = latest and latest.id
diff = [self._current_version, current]
# Sometimes the DB is in an inconsistent state when this
# signal is dispatched.
try:
if self._latest_version:
# Make sure stringifying this does not trigger
# Version.DoesNotExist before trying to use it for
# logging.
unicode(self._latest_version)
diff += [self._latest_version, latest]
except Version.DoesNotExist:
diff += [self._latest_version_id, latest_id]
updated = {}
send_signal = False
if self._current_version != current:
updated.update({'_current_version': current})
send_signal = True
# Don't use self.latest_version here. It may throw Version.DoesNotExist
# if we're called from a post_delete signal. We also don't set
# send_signal since we only want this fired if the public version
# changes.
if self._latest_version_id != latest_id:
updated.update({'_latest_version': latest})
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = dict([(k, v)
for (k, v) in updated.iteritems() if v != ignore])
if updated:
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s, '
u'latest: %s to %s for addon %s'
% tuple(diff + [self]))
except Exception, e:
log.error(u'Could not save version changes '
u'current: %s to %s, latest: %s to %s '
u'for addon %s (%s)'
% tuple(diff + [self, e]))
return bool(updated)
@property
def current_version(self):
"""Returns the current_version or None if the app is deleted or not
created yet"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@property
def latest_version(self):
"""Returns the latest_version or None if the app is deleted or not
created yet"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._latest_version
except ObjectDoesNotExist:
pass
return None
@property
def geodata(self):
if hasattr(self, '_geodata'):
return self._geodata
return Geodata.objects.get_or_create(addon=self)[0]
def get_api_url(self, action=None, api=None, resource=None, pk=False):
"""Reverse a URL for the API."""
if pk:
key = self.pk
else:
key = self.app_slug
return reverse('app-detail', kwargs={'pk': key})
def get_url_path(self, src=None):
url_ = reverse('detail', args=[self.app_slug])
if src is not None:
return urlparams(url_, src=src)
return url_
def get_detail_url(self, action=None):
"""Reverse URLs for 'detail', 'details.record', etc."""
return reverse(('detail.%s' % action) if action else 'detail',
args=[self.app_slug])
def get_purchase_url(self, action=None, args=None):
"""Reverse URLs for 'purchase', 'purchase.done', etc."""
return reverse(('purchase.%s' % action) if action else 'purchase',
args=[self.app_slug] + (args or []))
def get_dev_url(self, action='edit', args=None, prefix_only=False):
# Either link to the "new" Marketplace Developer Hub or the old one.
args = args or []
prefix = 'mkt.developers'
view_name = ('%s.%s' if prefix_only else '%s.apps.%s')
return reverse(view_name % (prefix, action),
args=[self.app_slug] + args)
def get_ratings_url(self, action='list', args=None):
"""Reverse URLs for 'ratings.list', 'ratings.add', etc."""
return reverse(('ratings.%s' % action),
args=[self.app_slug] + (args or []))
def get_stats_url(self):
return reverse('commonplace.stats.app_dashboard', args=[self.app_slug])
def get_comm_thread_url(self):
return reverse('commonplace.commbadge.app_dashboard',
args=[self.app_slug])
@staticmethod
def domain_from_url(url, allow_none=False):
if not url:
if allow_none:
return
raise ValueError('URL was empty')
pieces = urlparse.urlparse(url)
return '%s://%s' % (pieces.scheme, pieces.netloc.lower())
@property
def punycode_app_domain(self):
return self.app_domain.encode('idna')
@property
def parsed_app_domain(self):
if self.is_packaged:
raise ValueError('Packaged apps do not have a domain')
return urlparse.urlparse(self.app_domain)
@property
def device_types(self):
# If the transformer attached something, use it.
if hasattr(self, '_device_types'):
return self._device_types
return [DEVICE_TYPES[d.device_type] for d in
self.addondevicetype_set.order_by('device_type')]
@property
def origin(self):
if self.is_packaged:
return self.app_domain
parsed = urlparse.urlparse(self.get_manifest_url())
return '%s://%s' % (parsed.scheme, parsed.netloc)
def language_ascii(self):
lang = translation.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
def get_manifest_url(self, reviewer=False):
"""
Hosted apps: a URI to an external manifest.
Packaged apps: a URI to a mini manifest on m.m.o. If reviewer, the
mini-manifest behind reviewer auth pointing to the reviewer-signed
package.
"""
if self.is_packaged:
if reviewer and self.latest_version:
# Get latest version and return reviewer manifest URL.
version = self.latest_version
return absolutify(reverse('reviewers.mini_manifest',
args=[self.app_slug, version.id]))
elif self.current_version:
return absolutify(reverse('detail.manifest', args=[self.guid]))
else:
return '' # No valid version.
else:
return self.manifest_url
def has_icon_in_manifest(self):
data = self.get_manifest_json()
return 'icons' in data
def get_manifest_json(self, file_obj=None):
file_ = file_obj or self.get_latest_file()
if not file_:
return {}
try:
return file_.version.manifest
except AppManifest.DoesNotExist:
# TODO: Remove this when we're satisified the above is working.
log.info('Falling back to loading manifest from file system. '
'Webapp:%s File:%s' % (self.id, file_.id))
if file_.status == amo.STATUS_DISABLED:
file_path = file_.guarded_file_path
else:
file_path = file_.file_path
return WebAppParser().get_json_data(file_path)
def manifest_updated(self, manifest, upload):
"""The manifest has updated, update the version and file.
This is intended to be used for hosted apps only, which have only a
single version and a single file.
"""
data = parse_addon(upload, self)
manifest = WebAppParser().get_json_data(upload)
version = self.versions.latest()
max_ = Version._meta.get_field_by_name('_developer_name')[0].max_length
version.update(version=data['version'],
_developer_name=data['developer_name'][:max_])
try:
version.manifest_json.update(manifest=json.dumps(manifest))
except AppManifest.DoesNotExist:
AppManifest.objects.create(version=version,
manifest=json.dumps(manifest))
path = smart_path(nfd_str(upload.path))
file = version.files.latest()
file.filename = file.generate_filename(extension='.webapp')
file.size = storage.size(path)
file.hash = file.generate_hash(path)
log.info('Updated file hash to %s' % file.hash)
file.save()
# Move the uploaded file from the temp location.
copy_stored_file(path, os.path.join(version.path_prefix,
nfd_str(file.filename)))
log.info('[Webapp:%s] Copied updated manifest to %s' % (
self, version.path_prefix))
amo.log(amo.LOG.MANIFEST_UPDATED, self)
def has_incomplete_status(self):
return self.is_incomplete()
def details_errors(self):
"""
See if initial app submission is complete (details).
Returns list of reasons app may not be complete.
"""
reasons = []
if not self.support_email:
reasons.append(_('You must provide a support email.'))
if not self.name:
reasons.append(_('You must provide an app name.'))
if not self.device_types:
reasons.append(_('You must provide at least one device type.'))
if not self.categories:
reasons.append(_('You must provide at least one category.'))
if not self.previews.count():
reasons.append(_('You must upload at least one screenshot or '
'video.'))
return reasons
def details_complete(self):
"""
Checks if app detail submission is complete (first step of submit).
"""
return not self.details_errors()
def is_rated(self):
return self.content_ratings.exists()
def all_payment_accounts(self):
# TODO: cache this somehow. Using @cached_property was hard because
# there's no easy way to invalidate something that should be
# recalculated.
return (self.app_payment_accounts.select_related('payment_account')
.all())
def payment_account(self, provider_id):
from mkt.developers.models import AddonPaymentAccount
qs = (self.app_payment_accounts.select_related('payment_account')
.filter(payment_account__provider=provider_id))
try:
return qs.get()
except AddonPaymentAccount.DoesNotExist, exc:
log.info('non-existant payment account for app {app}: '
'{exc.__class__.__name__}: {exc}'
.format(app=self, exc=exc))
raise self.PayAccountDoesNotExist(
'No payment account for {app} named {pr}. '
'Choices: {all}'
.format(app=self,
pr=PROVIDER_CHOICES[provider_id],
all=[PROVIDER_CHOICES[a.payment_account.provider]
for a in self.all_payment_accounts()]))
def has_payment_account(self):
"""True if app has at least one payment account."""
return bool(self.all_payment_accounts().count())
def has_multiple_payment_accounts(self):
"""True if the app has more than one payment account."""
return self.all_payment_accounts().count() > 1
def payments_complete(self):
"""Also returns True if the app doesn't needs payments."""
return not self.needs_payment() or self.has_payment_account()
def completion_errors(self, ignore_ratings=False):
"""
Compiles all submission steps into a single error report.
ignore_ratings -- doesn't check for content_ratings for cases in which
content ratings were just created.
"""
errors = {}
if not self.details_complete():
errors['details'] = self.details_errors()
if not ignore_ratings and not self.is_rated():
errors['content_ratings'] = _('You must set up content ratings.')
if not self.payments_complete():
errors['payments'] = _('You must set up a payment account.')
return errors
def completion_error_msgs(self):
"""Returns submission error messages as a flat list."""
errors = self.completion_errors()
# details is a list of msgs instead of a string like others.
detail_errors = errors.pop('details', []) or []
return detail_errors + errors.values()
def is_fully_complete(self, ignore_ratings=False):
"""
Wrapper to submission errors for readability and testability (mocking).
"""
return not self.completion_errors(ignore_ratings)
def next_step(self):
"""
Gets the next step to fully complete app submission.
"""
if self.has_incomplete_status() and not self.details_complete():
# Some old public apps may have some missing detail fields.
return {
'name': _('Details'),
'description': _('This app\'s submission process has not been '
'fully completed.'),
'url': self.get_dev_url(),
}
elif not self.is_rated():
return {
'name': _('Content Ratings'),
'description': _('This app needs to get a content rating.'),
'url': self.get_dev_url('ratings'),
}
elif not self.payments_complete():
return {
'name': _('Payments'),
'description': _('This app needs a payment account set up.'),
'url': self.get_dev_url('payments'),
}
@amo.cached_property(writable=True)
def is_offline(self):
"""
Returns a boolean of whether this is an app that degrades
gracefully offline (i.e., is a packaged app or has an
`appcache_path` defined in its manifest).
"""
if self.is_packaged:
return True
manifest = self.get_manifest_json()
return bool(manifest and 'appcache_path' in manifest)
def mark_done(self):
"""When the submission process is done, update status accordingly."""
self.update(status=amo.WEBAPPS_UNREVIEWED_STATUS)
def update_status(self, **kwargs):
if self.is_deleted or self.status == amo.STATUS_BLOCKED:
return
def _log(reason, old=self.status):
log.info(u'Update app status [%s]: %s => %s (%s).' % (
self.id, old, self.status, reason))
amo.log(amo.LOG.CHANGE_STATUS, self.get_status_display(), self)
# Handle the case of no versions.
if not self.versions.exists():
self.update(status=amo.STATUS_NULL)
_log('no versions')
return
# Handle the case of versions with no files.
if not self.versions.filter(files__isnull=False).exists():
self.update(status=amo.STATUS_NULL)
_log('no versions with files')
return
# If the app is incomplete, don't update status.
if not self.is_fully_complete():
return
# If there are no public versions and at least one pending, set status
# to pending.
has_public = self.versions.filter(
files__status=amo.STATUS_PUBLIC).exists()
has_approved = self.versions.filter(
files__status=amo.STATUS_APPROVED).exists()
has_pending = self.versions.filter(
files__status=amo.STATUS_PENDING).exists()
# If no public versions but there are approved versions, set app to
# approved.
if not has_public and has_approved:
_log('has approved but no public files')
self.update(status=amo.STATUS_APPROVED)
return
# If no public versions but there are pending versions, set app to
# pending.
if not has_public and has_pending and not self.is_pending():
self.update(status=amo.STATUS_PENDING)
_log('has pending but no public files')
return
def authors_other_addons(self, app=None):
"""Return other apps by the same author."""
return (self.__class__.objects.visible()
.exclude(id=self.id).distinct()
.filter(addonuser__listed=True,
authors__in=self.listed_authors))
def can_be_purchased(self):
return self.is_premium() and self.status in amo.REVIEWED_STATUSES
def can_purchase(self):
return self.is_premium() and self.premium and self.is_public()
def is_purchased(self, user):
return user and self.id in user.purchase_ids()
def has_premium(self):
"""If the app is premium status and has a premium object."""
return bool(self.is_premium() and self.premium)
def get_price(self, carrier=None, region=None, provider=None):
"""
A shortcut to get the price as decimal. Returns None if their is no
price for the app.
:param optional carrier: an int for the carrier.
:param optional region: an int for the region. Defaults to restofworld.
:param optional provider: an int for the provider. Defaults to bango.
"""
if self.has_premium() and self.premium.price:
return self.premium.price.get_price(carrier=carrier, region=region,
provider=provider)
def get_price_locale(self, carrier=None, region=None, provider=None):
"""
A shortcut to get the localised price with currency. Returns None if
their is no price for the app.
:param optional carrier: an int for the carrier.
:param optional region: an int for the region. Defaults to restofworld.
:param optional provider: an int for the provider. Defaults to bango.
"""
if self.has_premium() and self.premium.price:
return self.premium.price.get_price_locale(
carrier=carrier, region=region, provider=provider)
def get_tier(self):
"""
Returns the price tier object.
"""
if self.has_premium():
return self.premium.price
def get_tier_name(self):
"""
Returns the price tier for showing prices in the reviewer
tools and developer hub.
"""
tier = self.get_tier()
if tier:
return tier.tier_locale()
@amo.cached_property
def promo(self):
return self.get_promo()
def get_promo(self):
try:
return self.previews.filter(position=-1)[0]
except IndexError:
pass
def get_region_ids(self, restofworld=False, excluded=None):
"""
Return IDs of regions in which this app is listed.
If `excluded` is provided we'll use that instead of doing our own
excluded lookup.
"""
if restofworld:
all_ids = mkt.regions.ALL_REGION_IDS
else:
all_ids = mkt.regions.REGION_IDS
if excluded is None:
excluded = self.get_excluded_region_ids()
return sorted(set(all_ids) - set(excluded or []))
def get_excluded_region_ids(self):
"""
Return IDs of regions for which this app is excluded.
This will be all the addon excluded regions. If the app is premium,
this will also exclude any region that does not have the price tier
set.
Note: free and in-app are not included in this.
"""
excluded = set(self.addonexcludedregion
.values_list('region', flat=True))
if self.is_premium():
all_regions = set(mkt.regions.ALL_REGION_IDS)
# Find every region that does not have payments supported
# and add that into the exclusions.
excluded = excluded.union(
all_regions.difference(self.get_price_region_ids()))
geo = self.geodata
if geo.region_de_iarc_exclude or geo.region_de_usk_exclude:
excluded.add(mkt.regions.DE.id)
if geo.region_br_iarc_exclude:
excluded.add(mkt.regions.BR.id)
return sorted(list(excluded))
def get_price_region_ids(self):
tier = self.get_tier()
if tier:
return sorted(p['region'] for p in tier.prices() if p['paid'])
return []
def get_regions(self, regions=None):
"""
Return a list of regions objects the app is available in, e.g.:
[<class 'mkt.constants.regions.BR'>,
<class 'mkt.constants.regions.CA'>,
<class 'mkt.constants.regions.UK'>,
<class 'mkt.constants.regions.US'>,
<class 'mkt.constants.regions.RESTOFWORLD'>]
if `regions` is provided we'll use that instead of calling
self.get_region_ids()
"""
regions_ids = regions or self.get_region_ids(restofworld=True)
_regions = map(mkt.regions.REGIONS_CHOICES_ID_DICT.get, regions_ids)
return sorted(_regions, key=lambda x: x.slug)
def listed_in(self, region=None, category=None):
listed = []
if region:
listed.append(region.id in self.get_region_ids(restofworld=True))
if category:
listed.append(category in (self.categories or []))
return all(listed or [False])
def content_ratings_in(self, region, category=None):
"""
Get all content ratings for this app in REGION for CATEGORY.
(e.g. give me the content ratings for a game listed in a Brazil.)
"""
# If we want to find games in Brazil with content ratings, then
# make sure it's actually listed in Brazil and it's a game.
if category and not self.listed_in(region, category):
return []
rb = []
if not region.ratingsbody:
# If a region doesn't specify a ratings body, default to GENERIC.
rb = mkt.ratingsbodies.GENERIC.id
else:
rb = region.ratingsbody.id
return list(self.content_ratings.filter(ratings_body=rb)
.order_by('rating'))
@classmethod
def now(cls):
return datetime.date.today()
def in_rereview_queue(self):
return self.rereviewqueue_set.exists()
def in_tarako_queue(self):
from mkt.reviewers.models import QUEUE_TARAKO
return self.additionalreview_set.unreviewed(queue=QUEUE_TARAKO)
def get_package_path(self):
"""Returns the `package_path` if the app is packaged."""
if not self.is_packaged:
return
version = self.current_version
if not version:
return
try:
file_obj = version.all_files[0]
except IndexError:
return
else:
return absolutify(
os.path.join(reverse('downloads.file', args=[file_obj.id]),
file_obj.filename))
def get_cached_manifest(self, force=False):
"""
Creates the "mini" manifest for packaged apps and caches it.
Call this with `force=True` whenever we need to update the cached
version of this manifest, e.g., when a new version of the packaged app
is approved.
If the addon is not a packaged app, this will not cache anything.
"""
if not self.is_packaged:
return
key = 'webapp:{0}:manifest'.format(self.pk)
if not force:
data = cache.get(key)
if data:
return data
version = self.current_version
if not version:
# There's no valid version so we return an empty mini-manifest.
# Note: We want to avoid caching this so when a version does become
# available it can get picked up correctly.
return '{}'
else:
# This will sign the package if it isn't already.
#
# Ensure that the calling method checks various permissions if
# needed. E.g. see mkt/detail/views.py. This is also called as a
# task after reviewer approval so we can't perform some checks
# here.
signed_file_path = packaged.sign(version.pk)
file_obj = version.all_files[0]
manifest = self.get_manifest_json(file_obj)
package_path = absolutify(
os.path.join(reverse('downloads.file', args=[file_obj.id]),
file_obj.filename))
data = {
'name': manifest['name'],
'version': version.version,
'size': storage.size(signed_file_path),
'release_notes': version.releasenotes,
'package_path': package_path,
}
for key in ['developer', 'icons', 'locales']:
if key in manifest:
data[key] = manifest[key]
data = json.dumps(data, cls=JSONEncoder)
cache.set(key, data, None)
return data
def sign_if_packaged(self, version_pk, reviewer=False):
if not self.is_packaged:
return
return packaged.sign(version_pk, reviewer=reviewer)
def is_premium_type_upgrade(self, premium_type):
"""
Returns True if changing self.premium_type from current value to passed
in value is considered an upgrade that should trigger a re-review.
"""
ALL = set(amo.ADDON_FREES + amo.ADDON_PREMIUMS)
free_upgrade = ALL - set([amo.ADDON_FREE])
free_inapp_upgrade = ALL - set([amo.ADDON_FREE, amo.ADDON_FREE_INAPP])
if (self.premium_type == amo.ADDON_FREE and
premium_type in free_upgrade):
return True
if (self.premium_type == amo.ADDON_FREE_INAPP and
premium_type in free_inapp_upgrade):
return True
return False
def create_blocklisted_version(self):
"""
Creates a new version who's file is the blocklisted app found in /media
and sets status to STATUS_BLOCKLISTED.
"""
blocklisted_path = os.path.join(settings.MEDIA_ROOT, 'packaged-apps',
'blocklisted.zip')
v = Version.objects.create(addon=self, version='blocklisted')
f = File(version=v, status=amo.STATUS_BLOCKED)
f.filename = f.generate_filename()
copy_stored_file(blocklisted_path, f.file_path)
log.info(u'[Webapp:%s] Copied blocklisted app from %s to %s' % (
self.id, blocklisted_path, f.file_path))
f.size = storage.size(f.file_path)
f.hash = f.generate_hash(f.file_path)
f.save()
mf = WebAppParser().get_json_data(f.file_path)
AppManifest.objects.create(version=v, manifest=json.dumps(mf))
self.sign_if_packaged(v.pk)
self.status = amo.STATUS_BLOCKED
self._current_version = v
self.save()
def update_name_from_package_manifest(self):
"""
Looks at the manifest.webapp inside the current version's file and
updates the app's name and translated names.
Note: Make sure the correct version is in place before calling this.
"""
if not self.is_packaged:
return None
file_ = self.current_version.all_files[0]
mf = self.get_manifest_json(file_)
# Get names in "locales" as {locale: name}.
locale_names = get_locale_properties(mf, 'name', self.default_locale)
# Check changes to default_locale.
locale_changed = self.update_default_locale(mf.get('default_locale'))
if locale_changed:
log.info(u'[Webapp:%s] Default locale changed from "%s" to "%s".'
% (self.pk, locale_changed[0], locale_changed[1]))
# Update names
crud = self.update_names(locale_names)
if any(crud.values()):
self.save()
def update_supported_locales(self, latest=False, manifest=None):
"""
Loads the manifest (for either hosted or packaged) and updates
Version.supported_locales for the current version or latest version if
latest=True.
"""
version = self.versions.latest() if latest else self.current_version
if not manifest:
file_ = version.all_files[0]
manifest = self.get_manifest_json(file_)
updated = False
supported_locales = ','.join(get_supported_locales(manifest))
if version.supported_locales != supported_locales:
updated = True
version.update(supported_locales=supported_locales, _signal=False)
return updated
@property
def app_type_id(self):
"""
Returns int of `1` (hosted), `2` (packaged), or `3` (privileged).
Used by ES.
"""
if self.latest_version and self.latest_version.is_privileged:
return amo.ADDON_WEBAPP_PRIVILEGED
elif self.is_packaged:
return amo.ADDON_WEBAPP_PACKAGED
return amo.ADDON_WEBAPP_HOSTED
@property
def app_type(self):
"""
Returns string of 'hosted', 'packaged', or 'privileged'.
Used in the API.
"""
return amo.ADDON_WEBAPP_TYPES[self.app_type_id]
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
viewer=(not require_owner),
ignore_disabled=ignore_disabled)
@property
def supported_locales(self):
"""
Returns a tuple of the form:
(localized default_locale, list of localized supported locales)
for the current public version.
"""
languages = []
version = self.current_version or self.latest_version
if version:
for locale in version.supported_locales.split(','):
if locale:
language = settings.LANGUAGES.get(locale.lower())
if language:
languages.append(language)
return (
settings.LANGUAGES.get(self.default_locale.lower()),
sorted(languages)
)
@property
def developer_name(self):
"""This is the developer name extracted from the manifest."""
version = self.current_version or self.latest_version
if version:
return version.developer_name
def get_trending(self, region=None):
"""
Returns trending value.
If no region, uses global value.
If region and region is not mature, uses global value.
Otherwise uses regional trending value.
"""
if region and not region.adolescent:
by_region = region.id
else:
by_region = 0
try:
return self.trending.get(region=by_region).value
except ObjectDoesNotExist:
return 0
def iarc_token(self):
"""
Simple hash to verify token in pingback API.
"""
return hashlib.sha512(settings.SECRET_KEY + str(self.id)).hexdigest()
def get_content_ratings_by_body(self, es=False):
"""
Gets content ratings on this app keyed by bodies.
es -- denotes whether to return ES-friendly results (just the IDs of
rating classes) to fetch and translate later.
"""
content_ratings = {}
for cr in self.content_ratings.all():
body = cr.get_body()
rating_serialized = {
'body': body.id,
'rating': cr.get_rating().id
}
if not es:
rating_serialized = dehydrate_content_rating(rating_serialized)
content_ratings[body.label] = rating_serialized
return content_ratings
def set_iarc_info(self, submission_id, security_code):
"""
Sets the iarc_info for this app.
"""
data = {'submission_id': submission_id,
'security_code': security_code}
info, created = IARCInfo.objects.safer_get_or_create(
addon=self, defaults=data)
if not created:
info.update(**data)
@write
def set_content_ratings(self, data):
"""
Central method for setting content ratings.
This overwrites or creates ratings, it doesn't delete and expects data
of the form::
{<ratingsbodies class>: <rating class>, ...}
"""
from . import tasks
if not data:
return
log.info('IARC setting content ratings for app:%s:%s' %
(self.id, self.app_slug))
for ratings_body, rating in data.items():
cr, created = self.content_ratings.safer_get_or_create(
ratings_body=ratings_body.id, defaults={'rating': rating.id})
if not created:
cr.update(rating=rating.id, modified=datetime.datetime.now())
log.info('IARC content ratings set for app:%s:%s' %
(self.id, self.app_slug))
geodata, c = Geodata.objects.get_or_create(addon=self)
save = False
# If app gets USK Rating Refused, exclude it from Germany.
has_usk_refused = self.content_ratings.filter(
ratings_body=mkt.ratingsbodies.USK.id,
rating=mkt.ratingsbodies.USK_REJECTED.id).exists()
save = geodata.region_de_usk_exclude != has_usk_refused
geodata.region_de_usk_exclude = has_usk_refused
# Un-exclude games in Brazil/Germany once they get a content rating.
save = (save or
geodata.region_br_iarc_exclude or
geodata.region_de_iarc_exclude)
geodata.region_br_iarc_exclude = False
geodata.region_de_iarc_exclude = False
# Un-disable apps that were disabled by the great IARC purge.
if (self.status == amo.STATUS_DISABLED and self.iarc_purged):
self.update(status=amo.STATUS_PUBLIC, iarc_purged=False)
if save:
geodata.save()
log.info('Un-excluding IARC-excluded app:%s from br/de')
tasks.index_webapps.delay([self.id])
@write
def set_descriptors(self, data):
"""
Sets IARC rating descriptors on this app.
data -- list of database flags ('has_usk_lang')
"""
create_kwargs = {}
for body in mkt.iarc_mappings.DESCS:
for desc, db_flag in mkt.iarc_mappings.DESCS[body].items():
create_kwargs[db_flag] = db_flag in data
rd, created = RatingDescriptors.objects.get_or_create(
addon=self, defaults=create_kwargs)
if not created:
rd.update(modified=datetime.datetime.now(),
**create_kwargs)
@write
def set_interactives(self, data):
"""
Sets IARC interactive elements on this app.
data -- list of database flags ('has_users_interact')
"""
create_kwargs = {}
for interactive, db_flag in mkt.iarc_mappings.INTERACTIVES.items():
create_kwargs[db_flag] = db_flag in data
ri, created = RatingInteractives.objects.get_or_create(
addon=self, defaults=create_kwargs)
if not created:
ri.update(**create_kwargs)
def set_iarc_storefront_data(self, disable=False):
"""Send app data to IARC for them to verify."""
try:
iarc_info = self.iarc_info
except IARCInfo.DoesNotExist:
# App wasn't rated by IARC, return.
return
release_date = datetime.date.today()
if self.status in amo.WEBAPPS_APPROVED_STATUSES:
version = self.current_version
if version and version.reviewed:
release_date = version.reviewed
elif self.status in amo.WEBAPPS_EXCLUDED_STATUSES:
# Using `_latest_version` since the property returns None when
# deleted.
version = self._latest_version
# Send an empty string to signify the app was removed.
release_date = ''
else:
# If not approved or one of the disabled statuses, we shouldn't be
# calling SET_STOREFRONT_DATA. Ignore this call.
return
log.debug('Calling SET_STOREFRONT_DATA for app:%s' % self.id)
xmls = []
for cr in self.content_ratings.all():
xmls.append(render_xml('set_storefront_data.xml', {
'app_url': self.get_url_path(),
'submission_id': iarc_info.submission_id,
'security_code': iarc_info.security_code,
'rating_system': cr.get_body().iarc_name,
'release_date': '' if disable else release_date,
'title': get_iarc_app_title(self),
'company': version.developer_name if version else '',
'rating': cr.get_rating().iarc_name,
'descriptors': self.rating_descriptors.iarc_deserialize(
body=cr.get_body()),
'interactive_elements':
self.rating_interactives.iarc_deserialize(),
}))
for xml in xmls:
r = get_iarc_client('services').Set_Storefront_Data(XMLString=xml)
log.debug('IARC result app:%s, rating_body:%s: %s' % (
self.id, cr.get_body().iarc_name, r))
def last_rated_time(self):
"""Most recent content rating modified time or None if not rated."""
if self.is_rated():
return self.content_ratings.order_by('-modified')[0].modified
class AddonUpsell(ModelBase):
free = models.ForeignKey(Webapp, related_name='_upsell_from')
premium = models.ForeignKey(Webapp, related_name='_upsell_to')
class Meta:
db_table = 'addon_upsell'
unique_together = ('free', 'premium')
def __unicode__(self):
return u'Free: %s to Premium: %s' % (self.free, self.premium)
@amo.cached_property
def premium_addon(self):
"""
Return the premium version, or None if there isn't one.
"""
try:
return self.premium
except Webapp.DoesNotExist:
pass
def cleanup(self):
try:
# Just accessing these may raise an error.
assert self.free and self.premium
except ObjectDoesNotExist:
log.info('Deleted upsell: from %s, to %s' %
(self.free_id, self.premium_id))
self.delete()
def cleanup_upsell(sender, instance, **kw):
if 'raw' in kw:
return
both = Q(free=instance) | Q(premium=instance)
for upsell in list(AddonUpsell.objects.filter(both)):
upsell.cleanup()
dbsignals.post_delete.connect(cleanup_upsell, sender=Webapp,
dispatch_uid='addon_upsell')
class Trending(ModelBase):
addon = models.ForeignKey(Webapp, related_name='trending')
value = models.FloatField(default=0.0)
# When region=0, it's trending using install counts across all regions.
region = models.PositiveIntegerField(null=False, default=0, db_index=True)
class Meta:
db_table = 'addons_trending'
unique_together = ('addon', 'region')
# Set translated_fields manually to avoid querying translations for addon
# fields we don't use.
Webapp._meta.translated_fields = [
Webapp._meta.get_field('homepage'),
Webapp._meta.get_field('privacy_policy'),
Webapp._meta.get_field('name'),
Webapp._meta.get_field('description'),
Webapp._meta.get_field('support_email'),
Webapp._meta.get_field('support_url'),
]
@receiver(dbsignals.post_save, sender=Webapp,
dispatch_uid='webapp.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
if instance.upsold and instance.upsold.free_id:
tasks.index_webapps.delay([instance.upsold.free_id])
tasks.index_webapps.delay([instance.id])
@receiver(dbsignals.post_save, sender=AddonUpsell,
dispatch_uid='addonupsell.search.index')
def update_search_index_upsell(sender, instance, **kw):
# When saving an AddonUpsell instance, reindex both apps to update their
# upsell/upsold properties in ES.
from . import tasks
if instance.free:
tasks.index_webapps.delay([instance.free.id])
if instance.premium:
tasks.index_webapps.delay([instance.premium.id])
models.signals.pre_save.connect(save_signal, sender=Webapp,
dispatch_uid='webapp_translations')
@receiver(signals.version_changed, dispatch_uid='update_cached_manifests')
def update_cached_manifests(sender, **kw):
if not kw.get('raw') and sender.is_packaged:
from mkt.webapps.tasks import update_cached_manifests
update_cached_manifests.delay(sender.id)
@Webapp.on_change
def watch_status(old_attr={}, new_attr={}, instance=None, sender=None, **kw):
"""Set nomination date when app is pending review."""
new_status = new_attr.get('status')
if not new_status:
return
addon = instance
old_status = old_attr['status']
# Log all status changes.
if old_status != new_status:
log.info(
'[Webapp:{id}] Status changed from {old_status}:{old_status_name} '
'to {new_status}:{new_status_name}'.format(
id=addon.id, old_status=old_status,
old_status_name=amo.STATUS_CHOICES_API.get(old_status,
'unknown'),
new_status=new_status,
new_status_name=amo.STATUS_CHOICES_API[new_status]))
if new_status == amo.STATUS_PENDING and old_status != new_status:
# We always set nomination date when app switches to PENDING, even if
# previously rejected.
try:
latest = addon.versions.latest()
log.debug('[Webapp:%s] Setting nomination date to now.' % addon.id)
latest.update(nomination=datetime.datetime.now())
except Version.DoesNotExist:
log.debug('[Webapp:%s] Missing version, no nomination set.'
% addon.id)
@Webapp.on_change
def watch_disabled(old_attr={}, new_attr={}, instance=None, sender=None, **kw):
attrs = dict((k, v) for k, v in old_attr.items()
if k in ('disabled_by_user', 'status'))
qs = (File.objects.filter(version__addon=instance.id)
.exclude(version__deleted=True))
if Webapp(**attrs).is_disabled and not instance.is_disabled:
for f in qs:
f.unhide_disabled_file()
if instance.is_disabled and not Webapp(**attrs).is_disabled:
for f in qs:
f.hide_disabled_file()
@receiver(dbsignals.post_save, sender=Webapp,
dispatch_uid='webapp.pre_generate_apk')
def pre_generate_apk(sender=None, instance=None, **kw):
"""
Pre-generate an Android APK for a public app.
"""
if kw.get('raw'):
return
if not getattr(settings, 'PRE_GENERATE_APKS', False):
log.info('[Webapp:{a}] APK pre-generation is disabled.'
.format(a=instance.id))
return
from . import tasks
generated = False
if instance.status in amo.WEBAPPS_APPROVED_STATUSES:
app_devs = set(d.id for d in instance.device_types)
if (amo.DEVICE_MOBILE.id in app_devs or
amo.DEVICE_TABLET.id in app_devs):
tasks.pre_generate_apk.delay(instance.id)
generated = True
log.info('[Webapp:{a}] APK pre-generated? {result}'
.format(a=instance.id, result='YES' if generated else 'NO'))
class Installed(ModelBase):
"""Track WebApp installations."""
addon = models.ForeignKey(Webapp, related_name='installed')
user = models.ForeignKey('users.UserProfile')
uuid = models.CharField(max_length=255, db_index=True, unique=True)
# Because the addon could change between free and premium,
# we need to store the state at time of install here.
premium_type = models.PositiveIntegerField(
null=True, default=None, choices=amo.ADDON_PREMIUM_TYPES.items())
install_type = models.PositiveIntegerField(
db_index=True, default=apps.INSTALL_TYPE_USER,
choices=apps.INSTALL_TYPES.items())
class Meta:
db_table = 'users_install'
unique_together = ('addon', 'user', 'install_type')
@receiver(models.signals.post_save, sender=Installed)
def add_uuid(sender, **kw):
if not kw.get('raw'):
install = kw['instance']
if not install.uuid and install.premium_type is None:
install.uuid = ('%s-%s' % (install.pk, str(uuid.uuid4())))
install.premium_type = install.addon.premium_type
install.save()
class AddonExcludedRegion(ModelBase):
"""
Apps are listed in all regions by default.
When regions are unchecked, we remember those excluded regions.
"""
addon = models.ForeignKey(Webapp, related_name='addonexcludedregion')
region = models.PositiveIntegerField(
choices=mkt.regions.REGIONS_CHOICES_ID)
class Meta:
db_table = 'addons_excluded_regions'
unique_together = ('addon', 'region')
def __unicode__(self):
region = self.get_region()
return u'%s: %s' % (self.addon, region.slug if region else None)
def get_region(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT.get(self.region)
@memoize(prefix='get_excluded_in')
def get_excluded_in(region_id):
"""
Return IDs of Webapp objects excluded from a particular region or excluded
due to Geodata flags.
"""
aers = list(AddonExcludedRegion.objects.filter(region=region_id)
.values_list('addon', flat=True))
# For pre-IARC unrated games in Brazil/Germany.
geodata_qs = Q()
region = parse_region(region_id)
if region in (mkt.regions.BR, mkt.regions.DE):
geodata_qs |= Q(**{'region_%s_iarc_exclude' % region.slug: True})
# For USK_RATING_REFUSED apps in Germany.
if region == mkt.regions.DE:
geodata_qs |= Q(**{'region_de_usk_exclude': True})
geodata_exclusions = []
if geodata_qs:
geodata_exclusions = list(Geodata.objects.filter(geodata_qs)
.values_list('addon', flat=True))
return set(aers + geodata_exclusions)
@receiver(models.signals.post_save, sender=AddonExcludedRegion,
dispatch_uid='clean_memoized_exclusions')
def clean_memoized_exclusions(sender, **kw):
if not kw.get('raw'):
for k in mkt.regions.ALL_REGION_IDS:
cache.delete_many([memoize_key('get_excluded_in', k)
for k in mkt.regions.ALL_REGION_IDS])
class IARCInfo(ModelBase):
"""
Stored data for IARC.
"""
addon = models.OneToOneField(Webapp, related_name='iarc_info')
submission_id = models.PositiveIntegerField(null=False)
security_code = models.CharField(max_length=10)
class Meta:
db_table = 'webapps_iarc_info'
def __unicode__(self):
return u'app:%s' % self.addon.app_slug
class ContentRating(ModelBase):
"""
Ratings body information about an app.
"""
addon = models.ForeignKey(Webapp, related_name='content_ratings')
ratings_body = models.PositiveIntegerField(
choices=[(k, rb.name) for k, rb in
mkt.ratingsbodies.RATINGS_BODIES.items()],
null=False)
rating = models.PositiveIntegerField(null=False)
class Meta:
db_table = 'webapps_contentrating'
unique_together = ('addon', 'ratings_body')
def __unicode__(self):
return u'%s: %s' % (self.addon, self.get_label())
def get_regions(self):
"""Gives us a list of Region classes that use this rating body."""
# All regions w/o specified ratings bodies fallback to Generic.
generic_regions = []
if self.get_body_class() == mkt.ratingsbodies.GENERIC:
generic_regions = mkt.regions.ALL_REGIONS_WITHOUT_CONTENT_RATINGS()
return ([x for x in mkt.regions.ALL_REGIONS_WITH_CONTENT_RATINGS()
if self.get_body_class() == x.ratingsbody] +
list(generic_regions))
def get_region_slugs(self):
"""Gives us the region slugs that use this rating body."""
if self.get_body_class() == mkt.ratingsbodies.GENERIC:
# For the generic rating body, we just pigeonhole all of the misc.
# regions into one region slug, GENERIC. Reduces redundancy in the
# final data structure. Rather than
# {'pe': {generic_rating}, 'ar': {generic_rating}, etc}, generic
# regions will just use single {'generic': {generic rating}}
return [mkt.regions.GENERIC_RATING_REGION_SLUG]
return [x.slug for x in self.get_regions()]
def get_body_class(self):
return mkt.ratingsbodies.RATINGS_BODIES[self.ratings_body]
def get_body(self):
"""Ratings body instance with translated strings attached."""
return mkt.ratingsbodies.dehydrate_ratings_body(self.get_body_class())
def get_rating_class(self):
return self.get_body_class().ratings[self.rating]
def get_rating(self):
"""Ratings instance with translated strings attached."""
return mkt.ratingsbodies.dehydrate_rating(self.get_rating_class())
def get_label(self):
"""Gives us the name to be used for the form options."""
return u'%s - %s' % (self.get_body().name, self.get_rating().name)
def update_status_content_ratings(sender, instance, **kw):
# Flips the app's status from NULL if it has everything else together.
if (instance.addon.has_incomplete_status() and
instance.addon.is_fully_complete()):
instance.addon.update(status=amo.STATUS_PENDING)
models.signals.post_save.connect(update_status_content_ratings,
sender=ContentRating,
dispatch_uid='c_rating_update_app_status')
# The RatingDescriptors table is created with dynamic fields based on
# mkt.constants.ratingdescriptors.
class RatingDescriptors(ModelBase, DynamicBoolFieldsMixin):
"""
A dynamically generated model that contains a set of boolean values
stating if an app is rated with a particular descriptor.
"""
addon = models.OneToOneField(Webapp, related_name='rating_descriptors')
class Meta:
db_table = 'webapps_rating_descriptors'
def __unicode__(self):
return u'%s: %s' % (self.id, self.addon.name)
def to_keys_by_body(self, body):
return [key for key in self.to_keys() if
key.startswith('has_%s' % body)]
def iarc_deserialize(self, body=None):
"""Map our descriptor strings back to the IARC ones (comma-sep.)."""
keys = self.to_keys()
if body:
keys = [key for key in keys if body.iarc_name.lower() in key]
return ', '.join(iarc_mappings.REVERSE_DESCS.get(desc) for desc
in keys)
# Add a dynamic field to `RatingDescriptors` model for each rating descriptor.
for db_flag, desc in mkt.iarc_mappings.REVERSE_DESCS.items():
field = models.BooleanField(default=False, help_text=desc)
field.contribute_to_class(RatingDescriptors, db_flag)
# The RatingInteractives table is created with dynamic fields based on
# mkt.constants.ratinginteractives.
class RatingInteractives(ModelBase, DynamicBoolFieldsMixin):
"""
A dynamically generated model that contains a set of boolean values
stating if an app features a particular interactive element.
"""
addon = models.OneToOneField(Webapp, related_name='rating_interactives')
class Meta:
db_table = 'webapps_rating_interactives'
def __unicode__(self):
return u'%s: %s' % (self.id, self.addon.name)
def iarc_deserialize(self):
"""Map our descriptor strings back to the IARC ones (comma-sep.)."""
return ', '.join(iarc_mappings.REVERSE_INTERACTIVES.get(inter)
for inter in self.to_keys())
# Add a dynamic field to `RatingInteractives` model for each rating descriptor.
for interactive, db_flag in mkt.iarc_mappings.INTERACTIVES.items():
field = models.BooleanField(default=False, help_text=interactive)
field.contribute_to_class(RatingInteractives, db_flag)
def iarc_cleanup(*args, **kwargs):
instance = kwargs.get('instance')
IARCInfo.objects.filter(addon=instance).delete()
ContentRating.objects.filter(addon=instance).delete()
RatingDescriptors.objects.filter(addon=instance).delete()
RatingInteractives.objects.filter(addon=instance).delete()
# When an app is deleted we need to remove the IARC data so the certificate can
# be re-used later.
models.signals.post_delete.connect(iarc_cleanup, sender=Webapp,
dispatch_uid='webapps_iarc_cleanup')
# The AppFeatures table is created with dynamic fields based on
# mkt.constants.features, which requires some setup work before we call `type`.
class AppFeatures(ModelBase, DynamicBoolFieldsMixin):
"""
A dynamically generated model that contains a set of boolean values
stating if an app requires a particular feature.
"""
version = models.OneToOneField(Version, related_name='features')
field_source = APP_FEATURES
class Meta:
db_table = 'addons_features'
def __unicode__(self):
return u'Version: %s: %s' % (self.version.id, self.to_signature())
def set_flags(self, signature):
"""
Sets flags given the signature.
This takes the reverse steps in `to_signature` to set the various flags
given a signature. Boolean math is used since "0.23.1" is a valid
signature but does not produce a string of required length when doing
string indexing.
"""
fields = self._fields()
# Grab the profile part of the signature and convert to binary string.
try:
profile = bin(int(signature.split('.')[0], 16)).lstrip('0b')
n = len(fields) - 1
for i, f in enumerate(fields):
setattr(self, f, bool(int(profile, 2) & 2 ** (n - i)))
except ValueError as e:
log.error(u'ValueError converting %s. %s' % (signature, e))
def to_signature(self):
"""
This converts the boolean values of the flags to a signature string.
For example, all the flags in APP_FEATURES order produce a string of
binary digits that is then converted to a hexadecimal string with the
length of the features list plus a version appended. E.g.::
>>> profile = '10001010111111010101011'
>>> int(profile, 2)
4554411
>>> '%x' % int(profile, 2)
'457eab'
>>> '%x.%s.%s' % (int(profile, 2), len(profile), 1)
'457eab.23.1'
"""
profile = ''.join('1' if getattr(self, f) else '0'
for f in self._fields())
return '%x.%s.%s' % (int(profile, 2), len(profile),
settings.APP_FEATURES_VERSION)
def to_list(self):
keys = self.to_keys()
# Strip `has_` from each feature.
field_names = [self.field_source[key[4:].upper()]['name']
for key in keys]
return sorted(field_names)
# Add a dynamic field to `AppFeatures` model for each buchet feature.
for k, v in APP_FEATURES.iteritems():
field = models.BooleanField(default=False, help_text=v['name'])
field.contribute_to_class(AppFeatures, 'has_%s' % k.lower())
class AppManifest(ModelBase):
"""
Storage for manifests.
Tied to version since they change between versions. This stores both hosted
and packaged apps manifests for easy access.
"""
version = models.OneToOneField(Version, related_name='manifest_json')
manifest = models.TextField()
class Meta:
db_table = 'app_manifest'
class RegionListField(json_field.JSONField):
def to_python(self, value):
value = super(RegionListField, self).to_python(value)
if value:
value = [int(v) for v in value]
return value
class Geodata(ModelBase):
"""TODO: Forgo AER and use bool columns for every region and carrier."""
addon = models.OneToOneField(Webapp, related_name='_geodata')
restricted = models.BooleanField(default=False)
popular_region = models.CharField(max_length=10, null=True)
banner_regions = RegionListField(default=None, null=True)
banner_message = PurifiedField()
# Exclude apps with USK_RATING_REFUSED in Germany.
region_de_usk_exclude = models.BooleanField(default=False)
class Meta:
db_table = 'webapps_geodata'
def __unicode__(self):
return u'%s (%s): <Webapp %s>' % (
self.id, 'restricted' if self.restricted else 'unrestricted',
self.addon.id)
def get_status(self, region):
"""
Return the status of listing in a given region (e.g., China).
"""
return getattr(self, 'region_%s_status' % parse_region(region).slug,
amo.STATUS_PUBLIC)
def set_status(self, region, status, save=False):
"""Return a tuple of `(value, changed)`."""
value, changed = None, False
attr = 'region_%s_status' % parse_region(region).slug
if hasattr(self, attr):
value = setattr(self, attr, status)
if self.get_status(region) != value:
changed = True
# Save only if the value is different.
if save:
self.save()
return None, changed
def get_status_slug(self, region):
return {
amo.STATUS_PENDING: 'pending',
amo.STATUS_PUBLIC: 'public',
amo.STATUS_REJECTED: 'rejected',
}.get(self.get_status(region), 'unavailable')
@classmethod
def get_status_messages(cls):
return {
# L10n: An app is awaiting approval for a particular region.
'pending': _('awaiting approval'),
# L10n: An app is rejected for a particular region.
'rejected': _('rejected'),
# L10n: An app requires additional review for a particular region.
'unavailable': _('requires additional review')
}
def banner_regions_names(self):
if self.banner_regions is None:
return []
return sorted(unicode(mkt.regions.REGIONS_CHOICES_ID_DICT.get(k).name)
for k in self.banner_regions)
def banner_regions_slugs(self):
if self.banner_regions is None:
return []
return sorted(unicode(mkt.regions.REGIONS_CHOICES_ID_DICT.get(k).slug)
for k in self.banner_regions)
def get_nominated_date(self, region):
"""
Return the timestamp of when the app was approved in a region.
"""
return getattr(self,
'region_%s_nominated' % parse_region(region).slug)
def set_nominated_date(self, region, timestamp=None, save=False):
"""Return a tuple of `(value, saved)`."""
value, changed = None, False
attr = 'region_%s_nominated' % parse_region(region).slug
if hasattr(self, attr):
if timestamp is None:
timestamp = datetime.datetime.now()
value = setattr(self, attr, timestamp)
if self.get_nominated_date(region) != value:
changed = True
# Save only if the value is different.
if save:
self.save()
return None, changed
# (1) Add a dynamic status field to `Geodata` model for each special region:
# - 0: STATUS_NULL (Unavailable)
# - 2: STATUS_PENDING (Pending)
# - 4: STATUS_PUBLIC (Public)
# - 12: STATUS_REJECTED (Rejected)
#
# (2) Add a dynamic nominated field to keep track of timestamp for when
# the developer requested approval for each region.
for region in mkt.regions.SPECIAL_REGIONS:
help_text = _('{region} approval status').format(region=region.name)
field = models.PositiveIntegerField(help_text=help_text,
choices=amo.STATUS_CHOICES.items(), db_index=True,
default=amo.STATUS_PENDING)
field.contribute_to_class(Geodata, 'region_%s_status' % region.slug)
help_text = _('{region} nomination date').format(region=region.name)
field = models.DateTimeField(help_text=help_text, null=True)
field.contribute_to_class(Geodata, 'region_%s_nominated' % region.slug)
# Add a dynamic field to `Geodata` model to exclude pre-IARC public unrated
# Brazil and Germany games.
for region in (mkt.regions.BR, mkt.regions.DE):
field = models.BooleanField(default=False)
field.contribute_to_class(Geodata, 'region_%s_iarc_exclude' % region.slug)
# Save geodata translations when a Geodata instance is saved.
models.signals.pre_save.connect(save_signal, sender=Geodata,
dispatch_uid='geodata_translations')
|
ngokevin/zamboni
|
mkt/webapps/models.py
|
Python
|
bsd-3-clause
| 103,635
|
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'', include('zendesk_auth.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
madisona/zendesk_django_auth
|
example/urls.py
|
Python
|
bsd-3-clause
| 208
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import sys
import copy
from datetime import datetime
from time import strptime
from core.data_structures.graph import Graph
from db.versions.v0_5_0.domain import DBVistrail, DBAction, DBTag, DBModule, \
DBConnection, DBPortSpec, DBFunction, DBParameter, DBLocation, DBAdd, \
DBChange, DBDelete, DBAnnotation, DBPort
def convertDate(date):
if date is not None and date != '':
return datetime(*strptime(date, '%d %b %Y %H:%M:%S')[0:6])
return datetime(1900, 1, 1)
def translateVistrail(_vistrail):
vistrail = DBVistrail()
for _action in _vistrail.db_get_actions():
# print 'translating action %s' % _action.db_time
functionName = 'translate%s%sAction' % \
(_action.db_what[0].upper(), _action.db_what[1:])
thisModule = sys.modules[__name__]
action = getattr(thisModule, functionName)(_action)
vistrail.db_add_action(action)
for _tag in _vistrail.db_get_tags():
tag = DBTag(time=_tag.db_time,
name=_tag.db_name)
vistrail.db_add_tag(tag)
convertIds(vistrail)
# for action in vistrail.getActions():
# print '%s %s' % (action.id, action.operations)
vistrail.db_version = '0.5.0'
return vistrail
def translateAddModuleAction(_action):
operations = []
for _module in _action.db_datas:
module = DBModule(id=_module.db_id,
name=_module.db_name,
cache=1,
location=DBLocation(id=_module.db_id,
x=_module.db_x,
y=_module.db_y))
module.db_location.relative = False
operation = DBAdd(id=_action.db_time,
what='module',
objectId=_module.db_id,
data=module)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateAddConnectionAction(_action):
operations = []
for _connection in _action.db_datas:
source = DBPort(id=_connection.db_id,
type='source',
moduleId=_connection.db_sourceId,
moduleName=_connection.db_sourceModule,
sig=_connection.db_sourcePort)
destination = DBPort(id=_connection.db_id,
type='destination',
moduleId=_connection.db_destinationId,
moduleName=_connection.db_destinationModule,
sig=_connection.db_destinationPort)
connection = DBConnection(id=_connection.db_id,
ports=[source, destination])
operation = DBAdd(id=_action.db_time,
what='connection',
objectId=_connection.db_id,
data=connection)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateChangeParameterAction(_action):
operations = []
for _set in _action.db_datas:
parameter = DBParameter(id=_set.db_parameterId,
pos=_set.db_parameterId,
name=_set.db_parameter,
alias=_set.db_alias,
val=_set.db_value,
type=_set.db_type)
function = DBFunction(id=_set.db_functionId,
pos=_set.db_functionId,
name=_set.db_function,
parameters=[parameter])
operation = DBChange(id=_action.db_time,
what='function',
oldObjId=_set.db_functionId,
parentObjId=_set.db_moduleId,
parentObjType='module',
data=function)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateAddModulePortAction(_action):
operations = []
for _portSpec in _action.db_datas:
# ids need to be checked
portSpec = DBPortSpec(id=_portSpec.db_moduleId,
name=_portSpec.db_portName,
type=_portSpec.db_portType,
spec=_portSpec.db_portSpec)
operation = DBAdd(id=_action.db_time,
what='portSpec',
objectId=(_portSpec.db_portName,
_portSpec.db_portType),
parentObjId=_portSpec.db_moduleId,
parentObjType='module',
data=portSpec)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateChangeAnnotationAction(_action):
operations = []
for _annotation in _action.db_datas:
if _annotation.db_key.strip() != '' or \
_annotation.db_value.strip() != '':
annotation = DBAnnotation(id=-1,
key=_annotation.db_key,
value=_annotation.db_value)
operation = DBChange(id=_action.db_time,
what='annotation',
oldObjId=_annotation.db_key,
parentObjId=_annotation.db_moduleId,
parentObjType='module',
data=annotation)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteModuleAction(_action):
operations = []
for _module in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='module',
objectId=_module.db_moduleId)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteConnectionAction(_action):
operations = []
for _connection in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='connection',
objectId=_connection.db_connectionId)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteFunctionAction(_action):
operations = []
for _function in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='function',
objectId=_function.db_functionId,
parentObjId=_function.db_moduleId,
parentObjType='module')
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteAnnotationAction(_action):
operations = []
for _annotation in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='annotation',
objectId=_annotation.db_key)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteModulePortAction(_action):
operations = []
for _portSpec in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='portSpec',
objectId=(_portSpec.db_portName,
_portSpec.db_portType),
parentObjId=_portSpec.db_moduleId,
parentObjType='module')
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateMoveModuleAction(_action):
operations = []
for _location in _action.db_datas:
location = DBLocation(id=_location.db_id,
x=_location.db_dx,
y=_location.db_dy)
location.relative = True
operation = DBChange(id=_action.db_time,
what='location',
oldObjId=_location.db_id,
parentObjId=_location.db_id,
parentObjType='module',
data=location)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
### UPDATE IDS ###
def convertIds(vistrail):
actions = vistrail.db_get_actions()
actions.sort(key=lambda x: x.db_id)
objectDict = {}
# refDict = {'objectDict': objectDict}
graph = Graph()
for action in actions:
graph.add_vertex(action.db_id)
graph.add_edge(action.db_prevId, action.db_id)
def convertAction(actionId):
if actionId == 0:
return
allOps = []
action = vistrail.db_get_action(actionId)
# objectDict = refDict['objectDict']
# if action.actionType == 'delete' or action.actionType == 'change':
# action.objectDict = copy.deepcopy(objectDict)
# else:
# action.objectDict = objectDict
for operation in action.db_get_operations():
allOps.extend(convertOperation(vistrail,
objectDict,
operation.vtType,
operation))
action.db_operations = allOps
def removeObjects(actionId):
if actionId == 0:
return
# print "removeObjects(%s)" % actionId
action = vistrail.db_get_action(actionId)
# need to reverse ops here
reverseOps = action.db_get_operations()
reverseOps.reverse()
for operation in reverseOps:
parentList = getTypeIdList(operation)
removeObject(operation.db_what,
operation.db_oldId,
objectDict,
parentList[:-1])
reverseOps.reverse()
graph.dfs(enter_vertex=convertAction,
leave_vertex=removeObjects)
def getTypeIdList(operation):
if operation.db_what in ('module', 'connection'):
return [(operation.db_what, operation.db_oldId)]
elif operation.db_what in \
('function', 'portSpec', 'location', 'annotation'):
return [('module', operation.db_oldParentId),
(operation.db_what, operation.db_oldId)]
elif operation.db_what in ('port'):
return [('connection', operation.db_oldParentId),
(operation.db_what, operation.db_oldId)]
elif operation.db_what in ('parameter'):
return [('module', operation.db_moduleId),
('function', operation.db_oldParentId),
('parameter', operation.db_oldId)]
else:
print "unknown type: '%s'" % operation.db_what
return [(operation.db_what, operation.db_oldId)]
def getOldId(object):
if object.vtType == 'annotation':
return object.db_key
elif object.vtType == 'port':
return object.db_type
elif object.vtType == 'portSpec':
return (object.db_name, object.db_type)
else:
return object.getPrimaryKey()
def getChildren(object):
childList = []
if object.vtType == 'module':
childList = object.db_get_functions() + \
object.db_get_portSpecs() + \
object.db_get_annotations()
childList.append(object.db_location)
object.db_functions = []
object.db_portSpecs = []
object.db_annotations = {}
object.db_location = None
elif object.vtType == 'connection':
childList = object.db_get_ports()
object.db_ports = []
elif object.vtType == 'function':
childList = object.db_get_parameters()
object.db_parameters = []
return childList
def captureObject(object, objectDict, newId, parentList):
# print "capturing %s" % object
currentDict = objectDict
for key in parentList:
(objType, objId) = key
# (currentId, newDict, _) = currentDict[(objType, objId)]
# currentDict = newDict
(objList, curIdx) = currentDict[(objType, objId)]
currentDict = objList[curIdx][1]
oldId = getOldId(object)
# print "capture: %s %s" % (object.vtType, oldId)
# currentDict[(object.vtType, oldId)] = (newId, {}, object)
if not currentDict.has_key((object.vtType, oldId)):
currentDict[(object.vtType, oldId)] = ([], -1)
(curList, curIdx) = currentDict[(object.vtType, oldId)]
curList.append((newId, {}, object, curIdx))
currentDict[(object.vtType, oldId)] = (curList, len(curList) - 1)
def captureDelete(objType, objId, objectDict, parentList):
currentDict = objectDict
for (aType, aId) in parentList:
# (currentId, newDict, _) = currentDict[(objType, objId)]
# currentDict = newDict
(objList, curIdx) = currentDict[(aType, aId)]
currentDict = objList[curIdx][1]
# print "captureDelete: %s %s" % (objType, objId)
if not currentDict.has_key((objType, objId)):
raise Exception("invalid delete")
(curList, curIdx) = currentDict[(objType, objId)]
curList.append((-1, {}, None, curIdx))
currentDict[(objType, objId)] = (curList, len(curList) - 1)
def removeObject(oldObjType, oldId, objectDict, parentList):
# print '%s %s' % (oldObjType, oldId)
# print objectDict
# print parentList
try:
currentDict = objectDict
for key in parentList:
(objType, objId) = key
# (currentId, newDict, _) = currentDict[(objType, objId)]
# currentDict = newDict
(objList, objIdx) = currentDict[(objType, objId)]
currentDict = objList[objIdx][1]
# print "remove: %s %s" % (oldObjType, oldId)
(curList, curIdx) = currentDict[(oldObjType, oldId)]
# print "ok"
newIdx = curList[curIdx][3]
# del curList[curIdx]
currentDict[(oldObjType, oldId)] = (curList, newIdx)
except KeyError:
print "cannot remove (%s, %s)" % (oldObjType, oldId)
print parentList
print objList
print "index: %s" % objIdx
def findNewId(typeIdList, objectDict):
try:
currentDict = objectDict
for key in typeIdList:
# (currentId, currentDict, currentObj) = currentDict[key]
(objList, curIdx) = currentDict[key]
if curIdx == -1:
return (None, None)
(currentId, currentDict, currentObj, _) = objList[curIdx]
if currentId == -1:
return (None, None)
return (currentId, currentObj)
except KeyError:
pass
return (None, None)
def getChildList(typeIdList, objectDict):
try:
currentDict = objectDict
for (objType, objOldId) in typeIdList:
# (currentId, currentDict, _) = currentDict[(objType, objOldId)]
(objList, curIdx) = currentDict[(objType, objOldId)]
if curIdx == -1:
return {}
currentDict = objList[curIdx][1]
return currentDict
except KeyError:
pass
return {}
def createOperation(actionType, objId, objType, parentId, parentType,
object=None):
if actionType == 'add':
operation = DBAdd(what=objType,
objectId=objId,
parentObjId=parentId,
parentObjType=parentType,
data=object)
elif actionType == 'change':
operation = DBChange(what=objType,
oldObjId=objId,
parentObjId=parentId,
parentObjType=parentType,
data=object)
elif actionType == 'delete':
operation = DBDelete(what=objType,
objectId=objId,
parentObjId=parentId,
parentObjType=parentType)
else:
msg = "Cannot find actionType='%s'" % actionType
raise Exception(msg)
return operation
def convertChangeToAdd(operation):
return DBAdd(what=operation.db_what,
objectId=operation.db_newObjId,
parentObjId=operation.db_parentObjId,
parentObjType=operation.db_parentObjType,
data=operation.db_data)
def convertOperation(vistrail, objectDict, actionType, operation):
newOps = []
if actionType == 'add':
object = operation.db_data
if object.vtType == 'parameter' and object.db_pos == -1:
return newOps
operation.db_oldId = operation.db_objectId
if operation.db_what == 'annotation':
operation.db_oldId = object.db_key
elif operation.db_what == 'port':
operation.db_oldId = object.db_type
operation.db_oldParentId = operation.db_parentObjId
parentList = getTypeIdList(operation)
newId = vistrail.idScope.getNewId(object.vtType)
captureObject(object, objectDict, newId, parentList[:-1])
operation.db_objectId = newId
oldId = object.getPrimaryKey()
if object.vtType == 'annotation':
oldId = object.db_key
elif object.vtType == 'port':
oldId = object.db_type
if hasattr(object, 'db_id'):
object.db_id = newId
# set parent ids correctly...
operation.db_id = vistrail.idScope.getNewId('operation')
if operation.db_parentObjId is not None:
oldParentObjId = operation.db_parentObjId
operation.db_parentObjId = findNewId(parentList[:-1], objectDict)[0]
if object.vtType == 'port':
object.db_moduleId = \
findNewId([('module', object.db_moduleId)], objectDict)[0]
# if object.vtType == 'connection':
# for port in object.db_ports.itervalues():
# port.db_moduleId = \
# findNewId([('module', port.db_moduleId)], objectDict)[0]
newOps.append(operation)
# set child operations
children = getChildren(object)
for child in children:
# hack to get around fact that location ids are wrong
if child.vtType == 'location':
child.db_id = oldId
newOp = createOperation('add',
child.getPrimaryKey(),
child.vtType,
oldId,
object.vtType,
child)
# hack to get moduleId at parameter level
if child.vtType == 'parameter':
newOp.db_moduleId = oldParentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'add',
newOp))
newOp.db_parentObjId = newId
elif actionType == 'change':
object = operation.db_data
if object.vtType == 'parameter' and object.db_pos == -1:
return newOps
operation.db_oldId = operation.db_oldObjId
if operation.db_what == 'annotation':
operation.db_oldId = object.db_key
elif operation.db_what == 'port':
operation.db_oldId = object.db_type
operation.db_oldParentId = operation.db_parentObjId
parentList = getTypeIdList(operation)
# need to get changed id as new id if have one
(foundId, foundObj) = findNewId(parentList, objectDict)
if foundId is not None:
if foundObj.vtType == 'function' and \
foundObj.db_pos == object.db_pos and \
foundObj.db_name == object.db_name:
# don't create new function, convert parameter
for parameter in object.db_parameters:
newOp = createOperation('change',
parameter.getPrimaryKey(),
parameter.vtType,
object.getPrimaryKey(),
object.vtType,
parameter)
newOp.db_moduleId = operation.db_parentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'change',
newOp))
newOp.db_parentObjId = foundId
return newOps
else:
if foundObj.vtType == 'location' and object.relative == True:
object.db_x += foundObj.db_x
object.db_y += foundObj.db_y
object.relative = False
# get new id for new object
newId = vistrail.idScope.getNewId(object.vtType)
operation.db_oldObjId = foundId
operation.db_newObjId = newId
else:
# get new id for new object
newId = vistrail.idScope.getNewId(object.vtType)
operation.db_oldObjId = -1
operation.db_newObjId = newId
anOldId = operation.db_oldId
anOldParentId = operation.db_parentObjId
if hasattr(operation,'db_moduleId'):
aModuleId = operation.db_moduleId
else:
aModuleId = None
operation = convertChangeToAdd(operation)
operation.db_oldId = anOldId
operation.db_oldParentId = operation.db_parentObjId
operation.db_moduleId = aModuleId
# need to do child deletes first
childDict = getChildList(parentList, objectDict)
for k,v in childDict.items():
(objType, objId) = k
# (newId, newDict) = v
# print 'creating delete for %s' % objType
newOp = createOperation('delete',
objId,
objType,
object.getPrimaryKey(),
object.vtType)
# hack to get moduleId at parameter level
if objType == 'parameter':
newOp.db_moduleId = operation.db_parentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'delete',
newOp))
newOp.db_parentObjId = newId
# don't reverse -- ordering is correct
# newOps.reverse()
# set new object id
captureObject(object, objectDict, newId, parentList[:-1])
# operation.db_objectId = newId
oldId = object.getPrimaryKey()
if object.vtType == 'annotation':
oldId = object.db_key
elif object.vtType == 'port':
oldId = object.db_type
if hasattr(object, 'db_id'):
object.db_id = newId
# set parent ids correctly...
operation.db_id = vistrail.idScope.getNewId('operation')
if operation.db_parentObjId is not None:
oldParentObjId = operation.db_parentObjId
operation.db_parentObjId = findNewId(parentList[:-1], objectDict)[0]
if object.vtType == 'port':
object.db_moduleId = \
findNewId([('module', object.db_moduleId)], objectDict)[0]
# if object.vtType == 'connection':
# for port in object.db_ports.itervalues():
# port.db_moduleId = \
# findNewId([('module', port.db_moduleId)], objectDict)[0]
newOps.append(operation)
# set child operations
children = getChildren(operation.db_data)
for child in children:
# print 'creating add for %s' % child.vtType
newOp = createOperation('add',
child.getPrimaryKey(),
child.vtType,
oldId,
object.vtType,
child)
# hack to get moduleId at parameter level
if child.vtType == 'parameter':
newOp.db_moduleId = oldParentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'add',
newOp))
newOp.db_parentObjId = newId
elif actionType == 'delete':
operation.db_oldId = operation.db_objectId
# if operation.db_what == 'annotation':
# operation.db_oldId = object.db_key
# elif operation.db_what == 'port':
# operation.db_oldId = object.db_type
operation.db_oldParentId = operation.db_parentObjId
parentList = getTypeIdList(operation)
# get new id for delete operation
(newId, _) = findNewId(parentList, objectDict)
# print 'found new id: %s' % newId
if newId is None:
msg = "Cannot find id: %s" % parentList
print msg
# raise Exception(msg)
return []
# need to do child deletes first
childDict = getChildList(parentList, objectDict)
for k,v in childDict.items():
(objType, objId) = k
# (newId, newDict) = v
newOp = createOperation('delete',
objId,
objType,
operation.db_objectId,
operation.db_what)
# hack to get moduleId at parameter level
if objType == 'parameter':
newOp.db_moduleId = operation.db_parentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'delete',
newOp))
newOp.db_parentObjId = newId
# newOps.reverse()
captureDelete(operation.db_what, operation.db_objectId, objectDict,
parentList[:-1])
operation.db_objectId = newId
# set parent ids correctly
operation.db_id = vistrail.idScope.getNewId('operation')
if operation.db_parentObjId is not None:
operation.db_parentObjId = findNewId(parentList[:-1], objectDict)[0]
newOps.append(operation)
return newOps
|
CMUSV-VisTrails/WorkflowRecommendation
|
vistrails/db/versions/v0_5_0/translate/v0_3_1.py
|
Python
|
bsd-3-clause
| 30,874
|
#!/usr/bin/env python
# Copyright (c) 2014, Warren Weckesser
# All rights reserved.
# See the LICENSE file for license information.
from os import path
from setuptools import setup
def get_odeintw_version():
"""
Find the value assigned to __version__ in odeintw/__init__.py.
This function assumes that there is a line of the form
__version__ = "version-string"
in odeintw/__init__.py. It returns the string version-string, or None if
such a line is not found.
"""
with open(path.join("odeintw", "__init__.py"), "r") as f:
for line in f:
s = [w.strip() for w in line.split("=", 1)]
if len(s) == 2 and s[0] == "__version__":
return s[1][1:-1]
_descr = ('Solve complex and matrix differential equations '
'with scipy.integrate.odeint.')
_long_descr = """
odeintw
=======
`odeintw` provides a wrapper of `scipy.integrate.odeint` that allows it to
handle complex and matrix differential equations. That is, it can solve
equations of the form
dZ/dt = F(Z, t, param1, param2, ...)
where `t` is real and `Z` is a real or complex array.
Since `odeintw` is just a wrapper of `scipy.integrate.odeint`, it requires
`scipy` to be installed. SciPy 0.15 or greater is required, to avoid a
bug in `scipy.stats.odeint` in older versions of SciPy.
See README.md at https://github.com/WarrenWeckesser/odeintw for examples.
"""
setup(name='odeintw',
version=get_odeintw_version(),
description=_descr,
long_description=_long_descr,
author='Warren Weckesser',
url='https://github.com/WarrenWeckesser/odeintw',
packages=['odeintw'],
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
keywords="scipy odeint",
install_requires=['scipy'])
|
WarrenWeckesser/odeintw
|
setup.py
|
Python
|
bsd-3-clause
| 1,957
|
from __future__ import absolute_import, division, print_function
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "functions"))
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from Image_Visualizing import present_3d, make_mask
# Relative path to subject 1 data
pathtodata = "../../../data/ds009/sub001/"
condition_location=pathtodata+"model/model001/onsets/task001_run001/"
location_of_images="../../../images/"
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
data = np.load('cluster_mask.npy')
data_new = data[..., 10:13]
X = np.reshape(data_new, (-1, 1))
connectivity = grid_to_graph(n_x= data_new.shape[0], n_y = data_new.shape[1], n_z = data_new.shape[2])
st = time.time()
n_clusters = 7 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, data_new.shape)
label_mean = np.zeros(n_clusters)
center = list()
#FIND THE AVERAGE T-VALUE PER CLUSTER
for j in range(n_clusters):
mask = label==j
index = np.where(mask)
center.append((np.mean(index[0]),np.mean(index[1]),np.mean(index[2])))
label_mean[j] =np.mean(data_new[mask])
#PRINT THE PLOTS
for i in range(data_new.shape[-1]):
plt.figure()
plt.imshow(data_new[...,i], cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label[...,i] == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ],linewidths= 0.4)
plt.xticks(())
plt.yticks(())
plt.savefig(location_of_images+"ward"+str(i)+'.png')
|
berkeley-stat159/project-alpha
|
code/utils/scripts/cluster.py
|
Python
|
bsd-3-clause
| 1,770
|
import math
from .sum import sum
def root_mean_square(x):
"""
Root mean square (RMS) is the square root of the sum of the squares of values in a list
divided by the length of the list. It is a mean function that measures the magnitude
of values in the list regardless of their sign.
Args:
x: A list or tuple of numerical objects.
Returns:
A float of the root mean square of the list.
Examples:
>>> root_mean_square([-1, 1, -1, 1])
1.0
>>> root_mean_square((9, 4))
6.96419413859206
>>> root_mean_square(9)
Traceback (most recent call last):
...
TypeError: root_mean_square() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('root_mean_square() expects a list or a tuple.')
squares = []
squares = [pow(num, 2) for num in x]
sum_of_squares = sum(squares)
ms = sum_of_squares / len(x)
rms = math.sqrt(ms)
return(rms)
|
jhowardanderson/simplestatistics
|
simplestatistics/statistics/root_mean_square.py
|
Python
|
bsd-3-clause
| 1,006
|
from django import VERSION
from django.core.management.commands.loaddata import Command as LoadDataCommand
# Because this command is used (instead of default loaddata), then settings have been imported
# and we can safely import MT modules
from wagtail_modeltranslation import settings as mt_settings
from wagtail_modeltranslation.utils import auto_populate
ALLOWED = (None, False, 'all', 'default', 'required')
ALLOWED_FOR_PRINT = ', '.join(str(i) for i in (0, ) + ALLOWED[1:]) # For pretty-printing
def check_mode(option, opt_str, value, parser, namespace=None):
if value == '0' or value.lower() == 'false':
value = False
if value not in ALLOWED:
raise ValueError("%s option can be only one of: %s" % (opt_str, ALLOWED_FOR_PRINT))
setattr(namespace or parser.values, option.dest, value)
class Command(LoadDataCommand):
leave_locale_alone = mt_settings.LOADDATA_RETAIN_LOCALE # Django 1.6
help = ('Using this option will cause fixtures to be loaded under auto-population MODE.' +
'Allowed values are: %s' % ALLOWED_FOR_PRINT)
if VERSION < (1, 8):
from optparse import make_option
option_list = LoadDataCommand.option_list + (
make_option('--populate', action='callback', callback=check_mode, type='string',
dest='populate', metavar='MODE', help=help),
)
else:
import argparse
class CheckAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
check_mode(self, option_string, value, parser, namespace)
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--populate', action=self.CheckAction, type=str, dest='populate',
metavar='MODE', help=self.help)
def __init__(self):
super(Command, self).__init__()
if mt_settings.LOADDATA_RETAIN_LOCALE and VERSION < (1, 6):
from django.utils import translation
self.locale = translation.get_language()
def handle(self, *fixture_labels, **options):
if self.can_import_settings and hasattr(self, 'locale'):
from django.utils import translation
translation.activate(self.locale)
mode = options.get('populate')
if mode is not None:
with auto_populate(mode):
return super(Command, self).handle(*fixture_labels, **options)
else:
return super(Command, self).handle(*fixture_labels, **options)
|
tomdyson/wagtail-modeltranslation
|
wagtail_modeltranslation/management/commands/loaddata.py
|
Python
|
bsd-3-clause
| 2,588
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from shop import messages
from shop.exceptions import ProductNotAvailable
from shop.money import AbstractMoney, Money
from shop.modifiers.base import BaseCartModifier
class DefaultCartModifier(BaseCartModifier):
"""
This modifier is required for almost every shopping cart. It handles the most basic
calculations, ie. multiplying the items unit prices with the chosen quantity.
Since this modifier sets the cart items line total, it must be listed as the first
entry in `SHOP_CART_MODIFIERS`.
"""
identifier = 'default'
def pre_process_cart_item(self, cart, cart_item, request, raise_exception=False):
"""
Limit the ordered quantity in the cart to the availability in the inventory.
"""
kwargs = {'product_code': cart_item.product_code}
kwargs.update(cart_item.extra)
availability = cart_item.product.get_availability(request, **kwargs)
if cart_item.quantity > availability.quantity:
if raise_exception:
raise ProductNotAvailable(cart_item.product)
cart_item.quantity = availability.quantity
cart_item.save(update_fields=['quantity'])
message = _("The ordered quantity for item '{product_name}' has been adjusted to "\
"{quantity} which is the maximum, currently available in stock.").\
format(product_name=cart_item.product.product_name, quantity=availability.quantity)
messages.info(request, message, title=_("Verify Quantity"), delay=5)
return super(DefaultCartModifier, self).pre_process_cart_item(cart, cart_item, request, raise_exception)
def process_cart_item(self, cart_item, request):
cart_item.unit_price = cart_item.product.get_price(request)
cart_item.line_total = cart_item.unit_price * cart_item.quantity
return super(DefaultCartModifier, self).process_cart_item(cart_item, request)
def process_cart(self, cart, request):
if not isinstance(cart.subtotal, AbstractMoney):
# if we don't know the currency, use the default
cart.subtotal = Money(cart.subtotal)
cart.total = cart.subtotal
return super(DefaultCartModifier, self).process_cart(cart, request)
class WeightedCartModifier(BaseCartModifier):
"""
This modifier is required for all shopping cart where we are interested into its weight.
It sums up the weight of all articles, ie. multiplying the items weight with the chosen
quantity.
If this modifier is used, the classes implementing the product shall override their
method ``get_weight()``, which must return the weight in kg as Decimal type.
"""
identifier = 'weights'
initial_weight = Decimal(0.01) # in kg
def pre_process_cart(self, cart, request, raise_exception=False):
cart.weight = self.initial_weight
return super(WeightedCartModifier, self).pre_process_cart(cart, request, raise_exception)
def pre_process_cart_item(self, cart, cart_item, request, raise_exception=False):
cart.weight += Decimal(cart_item.product.get_weight() * cart_item.quantity)
return super(WeightedCartModifier, self).pre_process_cart_item(cart_item, request, raise_exception)
|
divio/django-shop
|
shop/modifiers/defaults.py
|
Python
|
bsd-3-clause
| 3,413
|
# Authors: Veeresh Taranalli <veeresht@gmail.com>
# License: BSD 3 clause
from numpy import array, ones_like, arange
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_, assert_equal
from commpy.channelcoding.gfields import GF
class TestGaloisFields(object):
def test_closure(self):
for m in arange(1, 9):
x = GF(arange(2**m), m)
for a in x.elements:
for b in x.elements:
assert_((GF(array([a]), m) + GF(array([b]), m)).elements[0] in x.elements)
assert_((GF(array([a]), m) * GF(array([b]), m)).elements[0] in x.elements)
def test_addition(self):
m = 3
x = GF(arange(2**m), m)
y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m)
z = GF(array([6, 5, 1, 2, 6, 5, 3, 0]), m)
assert_array_equal((x+y).elements, z.elements)
def test_multiplication(self):
m = 3
x = GF(array([7, 6, 5, 4, 3, 2, 1, 0]), m)
y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m)
z = GF(array([4, 5, 4, 4, 6, 0, 5, 0]), m)
assert_array_equal((x*y).elements, z.elements)
def test_tuple_form(self):
m = 3
x = GF(arange(0, 2**m-1), m)
y = x.power_to_tuple()
z = GF(array([1, 2, 4, 3, 6, 7, 5]), m)
assert_array_equal(y.elements, z.elements)
def test_power_form(self):
m = 3
x = GF(arange(1, 2**m), m)
y = x.tuple_to_power()
z = GF(array([0, 1, 3, 2, 6, 4, 5]), m)
assert_array_equal(y.elements, z.elements)
m = 4
x = GF(arange(1, 2**m), m)
y = x.tuple_to_power()
z = GF(array([0, 1, 4, 2, 8, 5, 10, 3, 14, 9, 7, 6, 13, 11, 12]), m)
assert_array_equal(y.elements, z.elements)
def test_order(self):
m = 4
x = GF(arange(1, 2**m), m)
y = x.order()
z = array([1, 15, 15, 15, 15, 3, 3, 5, 15, 5, 15, 5, 15, 15, 5])
assert_array_equal(y, z)
def test_minpols(self):
m = 4
x = GF(arange(2**m), m)
z = array([2, 3, 19, 19, 19, 19, 7, 7, 31, 25, 31, 25, 31, 25, 25, 31])
assert_array_equal(x.minpolys(), z)
m = 6
x = GF(array([2, 8, 32, 6, 24, 35, 10, 40, 59, 41, 14, 37]), m)
z = array([67, 87, 103, 73, 13, 109, 91, 117, 7, 115, 11, 97])
assert_array_equal(x.minpolys(), z)
|
tarunlnmiit/CommPy
|
commpy/channelcoding/tests/test_gfields.py
|
Python
|
bsd-3-clause
| 2,375
|
from __future__ import division
from builtins import object
import numpy as np
from sporco.admm import cmod
class TestSet01(object):
def setup_method(self, method):
pass
def test_01(self):
N = 16
M = 4
K = 8
X = np.random.randn(M, K)
S = np.random.randn(N, K)
try:
b = cmod.CnstrMOD(X, S, (N, M))
b.solve()
except Exception as e:
print(e)
assert 0
def test_02(self):
N = 16
M = 4
K = 8
X = np.random.randn(M, K)
S = np.random.randn(N, K)
try:
b = cmod.CnstrMOD(X, S)
b.solve()
except Exception as e:
print(e)
assert 0
def test_03(self):
N = 16
M = 4
K = 8
X = np.random.randn(M, K)
S = np.random.randn(N, K)
dt = np.float16
opt = cmod.CnstrMOD.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True},
'DataType': dt})
b = cmod.CnstrMOD(X, S, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_04(self):
N = 16
M = 4
K = 8
X = np.random.randn(M, K)
S = np.random.randn(N, K)
dt = np.float32
opt = cmod.CnstrMOD.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True},
'DataType': dt})
b = cmod.CnstrMOD(X, S, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_05(self):
N = 16
M = 4
K = 8
X = np.random.randn(M, K)
S = np.random.randn(N, K)
dt = np.float64
opt = cmod.CnstrMOD.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True},
'DataType': dt})
b = cmod.CnstrMOD(X, S, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_06(self):
opt = cmod.CnstrMOD.Options({'AuxVarObj': False})
assert opt['fEvalX'] is True and opt['gEvalY'] is False
opt['AuxVarObj'] = True
assert opt['fEvalX'] is False and opt['gEvalY'] is True
def test_07(self):
opt = cmod.CnstrMOD.Options({'AuxVarObj': True})
assert opt['fEvalX'] is False and opt['gEvalY'] is True
opt['AuxVarObj'] = False
assert opt['fEvalX'] is True and opt['gEvalY'] is False
|
bwohlberg/sporco
|
tests/admm/test_cmod.py
|
Python
|
bsd-3-clause
| 2,763
|
from __future__ import absolute_import, print_function, division
import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
import theano.gof.op as op
from six import string_types
from theano.gof.type import Type, Generic
from theano.gof.graph import Apply, Variable
import theano.tensor as T
from theano import scalar
from theano import shared
config = theano.config
Op = op.Op
utils = op.utils
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def __init__(self, thingy):
self.thingy = thingy
def __eq__(self, other):
return type(other) == type(self) and other.thingy == self.thingy
def __str__(self):
return str(self.thingy)
def __repr__(self):
return str(self.thingy)
def filter(self, x, strict=False, allow_downcast=None):
# Dummy filter: we want this type to represent strings that
# start with `self.thingy`.
if not isinstance(x, string_types):
raise TypeError("Invalid type")
if not x.startswith(self.thingy):
raise ValueError("Invalid value")
return x
# Added to make those tests pass in DebugMode
@staticmethod
def may_share_memory(a, b):
# As this represent a string and string are immutable, they
# never share memory in the DebugMode sence. This is needed as
# Python reuse string internally.
return False
class MyOp(Op):
__props__ = ()
def make_node(self, *inputs):
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyType(sum([input.type.thingy for input in inputs]))()]
return Apply(self, inputs, outputs)
MyOp = MyOp()
class NoInputOp(Op):
"""An Op to test the corner-case of an Op with no input."""
__props__ = ()
def make_node(self):
return Apply(self, [], [MyType('test')()])
def perform(self, node, inputs, output_storage):
output_storage[0][0] = 'test Op no input'
class StructOp(Op):
__props__ = ()
def do_constant_folding(self, node):
# we are not constant
return False
# The input only serves to distinguish thunks
def make_node(self, i):
return Apply(self, [i], [scalar.uint64()])
def c_support_code_struct(self, node, name):
return "npy_uint64 counter%s;" % (name,)
def c_init_code_struct(self, node, name, sub):
return "counter%s = 0;" % (name,)
def c_code(self, node, name, input_names, outputs_names, sub):
return """
%(out)s = counter%(name)s;
counter%(name)s++;
""" % dict(out=outputs_names[0], name=name)
def c_code_cache_version(self):
return (1,)
class TestOp:
# Sanity tests
def test_sanity_0(self):
r1, r2 = MyType(1)(), MyType(2)()
node = MyOp.make_node(r1, r2)
# Are the inputs what I provided?
assert [x for x in node.inputs] == [r1, r2]
# Are the outputs what I expect?
assert [x.type for x in node.outputs] == [MyType(3)]
assert node.outputs[0].owner is node and node.outputs[0].index == 0
# validate
def test_validate(self):
try:
MyOp(Generic()(), MyType(1)()) # MyOp requires MyType instances
raise Exception("Expected an exception")
except Exception as e:
if str(e) != "Error 1":
raise
def test_op_no_input(self):
x = NoInputOp()()
f = theano.function([], x)
rval = f()
assert rval == 'test Op no input'
def test_op_struct(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
sop = StructOp()
c = sop(theano.tensor.constant(0))
mode = None
if theano.config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
f = theano.function([], c, mode=mode)
rval = f()
assert rval == 0
rval = f()
assert rval == 1
c2 = sop(theano.tensor.constant(1))
f2 = theano.function([], [c, c2], mode=mode)
rval = f2()
assert rval == [0, 0]
class TestMakeThunk(unittest.TestCase):
def test_no_c_code(self):
class IncOnePython(Op):
"""An Op with only a Python (perform) implementation"""
__props__ = ()
def make_node(self, input):
input = scalar.as_scalar(input)
output = input.type()
return Apply(self, [input], [output])
def perform(self, node, inputs, outputs):
input, = inputs
output, = outputs
output[0] = input + 1
i = scalar.int32('i')
o = IncOnePython()(i)
# Check that the c_code function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.c_code,
o.owner, 'o', ['x'], 'z', {'fail': ''})
storage_map = {i: [numpy.int32(3)],
o: [None]}
compute_map = {i: [True],
o: [False]}
thunk = o.owner.op.make_thunk(o.owner, storage_map, compute_map,
no_recycling=[])
required = thunk()
# Check everything went OK
assert not required # We provided all inputs
assert compute_map[o][0]
assert storage_map[o][0] == 4
def test_no_perform(self):
class IncOneC(Op):
"""An Op with only a C (c_code) implementation"""
__props__ = ()
def make_node(self, input):
input = scalar.as_scalar(input)
output = input.type()
return Apply(self, [input], [output])
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
return "%(z)s = %(x)s + 1;" % locals()
i = scalar.int32('i')
o = IncOneC()(i)
# Check that the perform function is not implemented
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
o.owner.op.perform,
o.owner, 0, [None])
storage_map = {i: [numpy.int32(3)],
o: [None]}
compute_map = {i: [True],
o: [False]}
thunk = o.owner.op.make_thunk(o.owner, storage_map, compute_map,
no_recycling=[])
if theano.config.cxx:
required = thunk()
# Check everything went OK
assert not required # We provided all inputs
assert compute_map[o][0]
assert storage_map[o][0] == 4
else:
self.assertRaises((NotImplementedError, utils.MethodNotDefined),
thunk)
def test_no_make_node(self):
class DoubleOp(Op):
"""An Op without make_node"""
__props__ = ()
itypes = [T.dmatrix]
otypes = [T.dmatrix]
def perform(self, node, inputs, outputs):
inp = inputs[0]
output = outputs[0]
output[0] = inp * 2
x_input = T.dmatrix('x_input')
f = theano.function([x_input], DoubleOp()(x_input))
inp = numpy.random.rand(5, 4)
out = f(inp)
assert numpy.allclose(inp * 2, out)
def test_test_value_python_objects():
for x in ([0, 1, 2], 0, 0.5, 1):
assert (op.get_test_value(x) == x).all()
def test_test_value_ndarray():
x = numpy.zeros((5, 5))
v = op.get_test_value(x)
assert (v == x).all()
def test_test_value_constant():
x = T.as_tensor_variable(numpy.zeros((5, 5)))
v = op.get_test_value(x)
assert numpy.all(v == numpy.zeros((5, 5)))
def test_test_value_shared():
x = shared(numpy.zeros((5, 5)))
v = op.get_test_value(x)
assert numpy.all(v == numpy.zeros((5, 5)))
def test_test_value_op():
try:
prev_value = config.compute_test_value
config.compute_test_value = 'raise'
x = T.log(numpy.ones((5, 5)))
v = op.get_test_value(x)
assert numpy.allclose(v, numpy.zeros((5, 5)))
finally:
config.compute_test_value = prev_value
def test_get_debug_values_no_debugger():
'get_debug_values should return [] when debugger is off'
prev_value = config.compute_test_value
try:
config.compute_test_value = 'off'
x = T.vector()
for x_val in op.get_debug_values(x):
assert False
finally:
config.compute_test_value = prev_value
def test_get_det_debug_values_ignore():
"""get_debug_values should return [] when debugger is ignore
and some values are missing """
prev_value = config.compute_test_value
try:
config.compute_test_value = 'ignore'
x = T.vector()
for x_val in op.get_debug_values(x):
assert False
finally:
config.compute_test_value = prev_value
def test_get_debug_values_success():
"""tests that get_debug_value returns values when available
(and the debugger is on)"""
prev_value = config.compute_test_value
for mode in ['ignore', 'warn', 'raise']:
try:
config.compute_test_value = mode
x = T.vector()
x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)
y = numpy.zeros((5, 5))
iters = 0
for x_val, y_val in op.get_debug_values(x, y):
assert x_val.shape == (4,)
assert y_val.shape == (5, 5)
iters += 1
assert iters == 1
finally:
config.compute_test_value = prev_value
def test_get_debug_values_exc():
"""tests that get_debug_value raises an exception when
debugger is set to raise and a value is missing """
prev_value = config.compute_test_value
try:
config.compute_test_value = 'raise'
x = T.vector()
try:
for x_val in op.get_debug_values(x):
# this assert catches the case where we
# erroneously get a value returned
assert False
raised = False
except AttributeError:
raised = True
# this assert catches the case where we got []
# returned, and possibly issued a warning,
# rather than raising an exception
assert raised
finally:
config.compute_test_value = prev_value
def test_debug_error_message():
"""tests that debug_error_message raises an
exception when it should."""
prev_value = config.compute_test_value
for mode in ['ignore', 'raise']:
try:
config.compute_test_value = mode
try:
op.debug_error_message('msg')
raised = False
except ValueError:
raised = True
assert raised
finally:
config.compute_test_value = prev_value
if __name__ == '__main__':
unittest.main()
|
JazzeYoung/VeryDeepAutoEncoder
|
theano/gof/tests/test_op.py
|
Python
|
bsd-3-clause
| 11,243
|
from django.contrib import admin
from django.contrib.admin import helpers
from django import http
from django.template import loader
from django.utils.safestring import mark_safe
from django.contrib.admin.util import unquote
from django.forms.models import modelform_factory
from django.utils import simplejson as json
from models import *
from utils import ctids, dbmodels
from forms import DataIdForm
#_______________________________________ INLINES
class VendorIdInline(admin.TabularInline):
model = VendorId
class BondIssuerInline(admin.TabularInline):
model = BondIssuer
#_______________________________________ ADMINS
class DataIdAdmin(admin.ModelAdmin):
list_display = ('code', 'name', 'live', 'get_country',
'curncy', 'content_type', 'firm_code', 'isin',
'tags')
form = DataIdForm
inlines = [VendorIdInline]
search_fields = ('code', 'name', 'description', 'isin', 'tags')
list_filter = ('content_type',)
save_on_top = True
def change_content(self, request, obj = None):
form = self.get_form(request, obj = obj)
data = request.POST or request.GET
form = form(initial = dict(data.items()), instance = obj)
html = self.render_content_form(request, form.content_form)
data = {'header':'htmls',
'body': [{'identifier': '.data-id-instrument',
'html': html}]
}
return http.HttpResponse(json.dumps(data), mimetype='application/javascript')
def render_content_form(self, request, content_form):
if content_form:
model = content_form._meta.model
content_admin = self.admin_site._instruments.get(model,None)
form = helpers.AdminForm(content_form,
list(content_admin.get_fieldsets(request)),
content_admin.prepopulated_fields,
content_admin.get_readonly_fields(request),
model_admin=content_admin)
return loader.render_to_string('admin/instdata/dataid/instrument_form.html',{'adminform':form})
else:
return ''
def add_view(self, request, **kwargs):
if request.is_ajax():
return self.change_content(request)
else:
return super(DataIdAdmin,self).add_view(request, **kwargs)
def change_view(self, request, object_id, **kwargs):
if request.is_ajax():
return self.change_content(request, self.get_object(request, unquote(object_id)))
else:
return super(DataIdAdmin,self).change_view(request, object_id, **kwargs)
def render_change_form(self, request, context, **kwargs):
content_form = context['adminform'].form.content_form
context['instform'] = self.render_content_form(request, content_form)
return super(DataIdAdmin,self).render_change_form(request, context, **kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "content_type":
kwargs["queryset"] = ctids()
return db_field.formfield(**kwargs)
return super(DataIdAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class VendorAdmin(admin.ModelAdmin):
list_display = ('code', 'name', 'summary')
prepopulated_fields = {'name': ('code',)}
class DataFieldAdmin(admin.ModelAdmin):
list_display = ('code', 'description', 'format')
class VendorDataFieldAdmin(admin.ModelAdmin):
list_display = ('vendor', 'field', 'code')
class VendorIdAdmin(admin.ModelAdmin):
list_display = ('ticker', 'vendor', 'dataid',)
class ExchangeAdmin(admin.ModelAdmin):
list_display = ('code','name',)
class BondMaturityTypeAdmin(admin.ModelAdmin):
list_display = ('code','description',)
class CouponTypeAdmin(admin.ModelAdmin):
list_display = ('code','month_frequency','day_count','description')
ordering = ('code','month_frequency','day_count')
class FutureContractAdmin(admin.ModelAdmin):
list_display = ('code','description','type','curncy','country','index','exchange')
class BondClassAdmin(admin.ModelAdmin):
list_display = ('code','bondcode','description','curncy','country','issuer','sovereign','convertible')
search_fields = ('bondcode',)
list_filter = ('sovereign','convertible','curncy', 'country')
inlines = [BondIssuerInline]
class BondIssuerAdmin(admin.ModelAdmin):
list_display = ('bond_class','issuer','ccy','dt')
search_fields = ('bond_class__code',)
class CollateralTypeAdmin(admin.ModelAdmin):
list_display = ('name','order')
class FundTypeAdmin(admin.ModelAdmin):
list_display = ('code','name','openended','description')
class FundManagerAdmin(admin.ModelAdmin):
list_display = ('code','name','description','website')
class IcAdmin(admin.ModelAdmin):
list_display = ('code','firm_code','instype','ccy','data_id')
search_fields = ('code','firm_code')
class BondAdmin(admin.ModelAdmin):
list_display = ('code','bond_class','ISIN','coupon','maturity_date')
search_fields = ('ISIN',)
class InstDecompAdmin(admin.ModelAdmin):
list_display = ('code','dataid','dt','composition')
ordering = ('code','-dt')
class MktDataAdmin(admin.ModelAdmin):
list_display = ('vendor_id','field','dt','mkt_value')
search_fields = ('vendor_id__ticker',)
ordering = ('-dt',)
class IndustryCodeAdmin(admin.ModelAdmin):
list_display = ('id' , 'code' , 'description' , 'parent')
#_______________________________________ REGISTERING
admin.site.register(Vendor,VendorAdmin)
admin.site.register(VendorDataField,VendorDataFieldAdmin)
admin.site.register(VendorId,VendorIdAdmin)
admin.site.register(DataField,DataFieldAdmin)
admin.site.register(DataId,DataIdAdmin)
admin.site.register(Exchange,ExchangeAdmin)
admin.site.register(BondMaturityType,BondMaturityTypeAdmin)
admin.site.register(FutureContract,FutureContractAdmin)
admin.site.register(BondClass,BondClassAdmin)
admin.site.register(BondIssuer,BondIssuerAdmin)
admin.site.register(CollateralType,CollateralTypeAdmin)
admin.site.register(FundManager,FundManagerAdmin)
admin.site.register(FundType,FundTypeAdmin)
admin.site.register(InstDecomp,InstDecompAdmin)
admin.site.register(IndustryCode,IndustryCodeAdmin)
###admin.site.register(InstrumentCode,IcAdmin)
###admin.site.register(Cash3, list_display = ('id','code','curncy','type','extended'))
###admin.site.register(FwdCash, list_display = ('id','code','curncy','value_date'))
###admin.site.register(MktData,MktDataAdmin)
admin.site._instruments = {}
for inst in dbmodels():
admin.site.register(inst)
inst_admin = admin.site._registry.pop(inst)
admin.site._instruments[inst] = inst_admin
|
lsbardel/flow
|
flow/db/instdata/admin.py
|
Python
|
bsd-3-clause
| 7,082
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from kiwisolver import Variable
from .linear_symbolic import LinearSymbolic
class ConstraintsNamespace(object):
""" A class which acts as a namespace for kiwi constraint variables.
The constraint variables are created on an as-needed basis, this
allows components to define new constraints and build layouts
with them, without having to specifically update this client code.
"""
def __init__(self, name, owner):
""" Initialize a ConstraintsNamespace.
Parameters
----------
name : str
A name to use in the label for the constraint variables in
this namespace.
owner : str
The owner id to use in the label for the constraint variables
in this namespace.
"""
self._name = name
self._owner = owner
self._constraints = {}
def __getattr__(self, name):
""" Returns a kiwi constraint variable for the given name,
unless the name is already in the instance dictionary.
Parameters
----------
name : str
The name of the constraint variable to return.
"""
try:
return super(ConstraintsNamespace, self).__getattr__(name)
except AttributeError:
pass
constraints = self._constraints
if name in constraints:
res = constraints[name]
else:
label = '{0}|{1}|{2}'.format(self._name, self._owner, name)
res = constraints[name] = Variable(label)
return res
def __setattr__(self, name, value):
""" Adds a kiwi constraint variable to the constraints dictionary.
Parameters
----------
name : str
The name of the constraint variable to set.
value : LinearSymbolic
The kiwi variable to add to the constraints dictionary.
"""
if isinstance(value, LinearSymbolic):
self._constraints[name] = value
else:
super(ConstraintsNamespace, self).__setattr__(name, value)
|
tommy-u/enable
|
enable/layout/constraints_namespace.py
|
Python
|
bsd-3-clause
| 2,304
|
# Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Nox test automation configuration.
See: https://nox.readthedocs.io/en/latest/
"""
import os
import os.path
import shutil
import nox
supported_pythons = ["3.7", "3.8"]
system_test_pythons = ["3.7", "3.8"]
latest_python = "3.8"
# Use a consistent version of black so CI is deterministic.
# Should match Stickler: https://stickler-ci.com/docs#black
black_package = "black==20.8b1"
@nox.session(python=latest_python)
def lint(session):
session.install(black_package, "flake8")
session.run("flake8", "pandas_gbq")
session.run("flake8", "tests")
session.run("black", "--check", ".")
@nox.session(python=latest_python)
def blacken(session):
session.install(black_package)
session.run("black", ".")
@nox.session(python=supported_pythons)
def unit(session):
session.install("pytest", "pytest-cov")
session.install(
"-e",
".",
# Use dependencies versions from constraints file. This enables testing
# across a more full range of versions of the dependencies.
"-c",
os.path.join(".", "ci", "constraints-{}.pip".format(session.python)),
)
session.run(
"pytest",
os.path.join(".", "tests", "unit"),
"-v",
"--cov=pandas_gbq",
"--cov=tests.unit",
"--cov-report",
"xml:/tmp/pytest-cov.xml",
*session.posargs
)
@nox.session(python=latest_python)
def cover(session):
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=73")
session.run("coverage", "erase")
@nox.session(python=latest_python)
def docs(session):
"""Build the docs."""
session.install("-r", os.path.join("docs", "requirements-docs.txt"))
session.install("-e", ".")
shutil.rmtree(os.path.join("docs", "source", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "source", "_build", "doctrees", ""),
os.path.join("docs", "source", ""),
os.path.join("docs", "source", "_build", "html", ""),
)
@nox.session(python=system_test_pythons)
def system(session):
session.install("pytest", "pytest-cov")
session.install(
"-e",
".",
# Use dependencies versions from constraints file. This enables testing
# across a more full range of versions of the dependencies.
"-c",
os.path.join(".", "ci", "constraints-{}.pip".format(session.python)),
)
# Skip local auth tests on CI.
additional_args = list(session.posargs)
if "CIRCLECI" in os.environ:
additional_args = additional_args + ["-m", "not local_auth"]
session.run(
"pytest",
os.path.join(".", "tests", "system"),
os.path.join(".", "samples", "tests"),
"-v",
*additional_args
)
|
pydata/pandas-gbq
|
noxfile.py
|
Python
|
bsd-3-clause
| 3,146
|
"""
This is the Django template system.
How it works:
The Lexer.tokenize() method converts a template string (i.e., a string
containing markup with custom template tags) to tokens, which can be either
plain text (TokenType.TEXT), variables (TokenType.VAR), or block statements
(TokenType.BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
import inspect
import logging
import re
from enum import Enum
from django.template.context import BaseContext
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, SafeString, mark_safe
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import gettext_lazy, pgettext_lazy
from .exceptions import TemplateSyntaxError
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# Match BLOCK_TAG_*, VARIABLE_TAG_*, and COMMENT_TAG_* tags and capture the
# entire tag, including start/end delimiters. Using re.compile() is faster
# than instantiating SimpleLazyObject with _lazy_re_compile().
tag_re = re.compile(r'({%.*?%}|{{.*?}}|{#.*?#})')
logger = logging.getLogger('django.template')
class TokenType(Enum):
TEXT = 0
VAR = 1
BLOCK = 2
COMMENT = 3
# A mapping between tag start strings and the corresponding token type
TAG_START_TOKEN_TYPE_MAP = {
VARIABLE_TAG_START: TokenType.VAR,
BLOCK_TAG_START: TokenType.BLOCK,
COMMENT_TAG_START: TokenType.COMMENT,
}
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % self.params
class Origin:
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __repr__(self):
return '<%s name=%r>' % (self.__class__.__qualname__, self.name)
def __eq__(self, other):
return (
isinstance(other, Origin) and
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template:
def __init__(self, template_string, origin=None, name=None, engine=None):
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = str(template_string) # May be lazy.
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
yield from node
def __repr__(self):
return '<%s template_string="%s...">' % (
self.__class__.__qualname__,
self.source[:20].replace('\n', ''),
)
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token:
def __init__(self, token_type, contents, position=None, lineno=None, full_token_string=None):
"""
A token representing a string from the template.
token_type
A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
full_token_string
The verbatim token string - includes '{%', '%}', '{{', '}}' and
other characters which are stripped out of the contents arg.
This is only required for non text tokens - the contents of text
tokens are never stripped.
"""
self.token_type, self.contents, self.token_string = token_type, contents, full_token_string
self.lineno = lineno
self.position = position
def __repr__(self):
token_name = self.token_type.name.capitalize()
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = smart_split(self.contents)
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinel = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinel):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer:
def __init__(self, template_string):
self.template_string = template_string
def __repr__(self):
return '<%s template_string="%s...">' % (
self.__class__.__qualname__,
self.template_string[:20].replace('\n', ''),
)
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for token_string in tag_re.split(self.template_string):
if token_string:
result.append(self.create_token(token_string, None, lineno, in_tag))
lineno += token_string.count('\n')
in_tag = not in_tag
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag:
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
token_start = token_string[0:2]
block_content = token_string[2:-2].strip()
token_type = TAG_START_TOKEN_TYPE_MAP[token_start]
return Token(token_type, block_content, position, lineno, token_string)
else:
return Token(TokenType.TEXT, token_string, position, lineno)
class DebugLexer(Lexer):
def _tag_re_split_positions(self):
last = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
yield last, start
yield start, end
last = end
yield last, len(self.template_string)
# This parallels the use of tag_re.split() in Lexer.tokenize().
def _tag_re_split(self):
for position in self._tag_re_split_positions():
yield self.template_string[slice(*position)], position
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
"""
# For maintainability, it is helpful if the implementation below can
# continue to closely parallel Lexer.tokenize()'s implementation.
in_tag = False
lineno = 1
result = []
for token_string, position in self._tag_re_split():
if token_string:
result.append(self.create_token(token_string, position, lineno, in_tag))
lineno += token_string.count('\n')
in_tag = not in_tag
return result
class Parser:
def __init__(self, tokens, libraries=None, builtins=None, origin=None):
# Reverse the tokens so delete_first_token(), prepend_token(), and
# next_token() can operate at the end of the list in constant time.
self.tokens = list(reversed(tokens))
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
self.origin = origin
def __repr__(self):
return '<%s tokens=%r>' % (self.__class__.__qualname__, self.tokens)
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TokenType.* for a tiny performance boost.
token_type = token.token_type.value
if token_type == 0: # TokenType.TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token_type == 1: # TokenType.VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token_type == 2: # TokenType.BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def parse_verbatim(self, parse_until_tag):
"""
Iterate through the parser tokens and returns a string that is a
verbatim copy of the original template fragment.
Fragment starts from the current token position and ends when
parse_until_tag is reached.
"""
text_list = []
# See #23424: matching the end tag via a regex rather than a simple test
# allows verbatim content to contain tag delimiters eg. '{{' and '{%'
end_tag_re = re.compile(f'.*({{%\\s*{parse_until_tag}\\s*?%}}).*')
while self.tokens:
token = self.next_token()
# Use the full token_string if not TokenType.TEXT
verbatim_content = token.contents if token.token_type.value == 0 else token.token_string
if match := end_tag_re.match(verbatim_content):
# 23424: if the verbatim content has additional chars
# that have not been matched (this can happen if verbatim content
# contains tag delimiters), the token needs to be be split
# into smaller tokens.
matched_end_tag = match.groups()[0]
inner_text, outer_text = verbatim_content.split(matched_end_tag)
if inner_text:
text_list.append(inner_text)
if outer_text:
lexer = Lexer(outer_text)
split_tokens = lexer.tokenize()
for new_token in reversed(split_tokens):
self.prepend_token(new_token)
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
# Need to construct a new token as the token passed in might
# have been split above.
end_token = Token(TokenType.BLOCK, parse_until_tag)
self.prepend_token(end_token)
return ''.join(text_list)
text_list.append(verbatim_content)
return ''.join(text_list)
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TokenType.BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set origin and token here since we can't modify the node __init__()
# method.
node.token = token
node.origin = self.origin
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(
token,
"Invalid block tag on line %d: '%s', expected %s. Did you "
"forget to register or load this tag?" % (
token.lineno,
command,
get_text_list(["'%s'" % p for p in parse_until], 'or'),
),
)
raise self.error(
token,
"Invalid block tag on line %d: '%s'. Did you forget to register "
"or load this tag?" % (token.lineno, command)
)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % (
token.lineno,
command,
', '.join(parse_until),
)
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop()
def prepend_token(self, token):
self.tokens.append(token)
def delete_first_token(self):
del self.tokens[-1]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': r'\w\.',
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = _lazy_re_compile(filter_raw_string, re.VERBOSE)
class FilterExpression:
"""
Parse a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match['var'], match['constant']
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match['filter_name']
args = []
constant_arg, var_arg = match['constant_arg'], match['var_arg']
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = inspect.unwrap(func)
args, _, _, defaults, _, _, _ = inspect.getfullargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def __repr__(self):
return "<%s %r>" % (self.__class__.__qualname__, self.token)
class Variable:
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, str):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
# Try to interpret values containing a period or an 'e'/'E'
# (possibly scientific notation) as a float; otherwise, try int.
if '.' in var or 'e' in var.lower():
self.literal = float(var)
# "2." is invalid
if var[-1] == '.':
raise ValueError
else:
self.literal = int(var)
except ValueError:
# A ValueError means that the variable isn't a number.
if var[0:2] == '_(' and var[-1] == ')':
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if VARIABLE_ATTRIBUTE_SEPARATOR + '_' in var or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return gettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
signature = inspect.signature(current)
try:
signature.bind()
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node:
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug:
# Store the actual node that caused the exception.
if not hasattr(e, '_culprit_node'):
e._culprit_node = self
if (
not hasattr(e, 'template_debug') and
context.render_context.template.origin == e._culprit_node.origin
):
e.template_debug = context.render_context.template.get_exception_info(
e, e._culprit_node.token,
)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
return SafeString(''.join([
node.render_annotated(context) for node in self
]))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
child_nodelists = ()
def __init__(self, s):
self.s = s
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.s[:25])
def render(self, context):
return self.s
def render_annotated(self, context):
"""
Return the given value.
The default implementation of this method handles exceptions raised
during rendering, which is not necessary for text nodes.
"""
return self.s
def render_value_in_context(value, context):
"""
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
if context.autoescape:
if not issubclass(type(value), str):
value = str(value)
return conditional_escape(value)
else:
return str(value)
class VariableNode(Node):
child_nodelists = ()
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = _lazy_re_compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match[1]
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match[1]:
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
|
atul-bhouraskar/django
|
django/template/base.py
|
Python
|
bsd-3-clause
| 41,520
|
#
# Depends
# Copyright (C) 2014 by Andrew Gardner & Jonas Unger. All rights reserved.
# BSD license (LICENSE.txt for details).
#
from PySide import QtCore, QtGui
import node
import data_packet
"""
A QT graphics widget that displays the state of a given scenegraph. The user
can also mouseover a given item, which emits a mouseover QT signal.
"""
class SceneGraphWidget(QtGui.QWidget):
"""
A QT graphics widget that displays items in a given scenegraph and the
presence of each item's data. It is also capable of highlighting a
collection of items if desired.
"""
# Signals
mouseover = QtCore.Signal(node.DagNode)
def __init__(self, parent=None):
"""
"""
QtGui.QWidget.__init__(self, parent)
self.mainLayout = QtGui.QVBoxLayout(self)
self.mainLayout.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(self.mainLayout)
self.setMinimumWidth(400)
self.tableWidget = QtGui.QTableWidget()
self.tableWidget.setRowCount(0)
self.tableWidget.setColumnCount(1)
self.tableWidget.horizontalHeader().hide()
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.horizontalHeader().setResizeMode(1, QtGui.QHeaderView.Stretch)
self.tableWidget.verticalHeader().hide()
self.tableWidget.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)
self.tableWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.tableWidget.setStyleSheet("QTableWidget::item:selected{ background-color: rgb(91, 114, 138); }")
self.mainLayout.addWidget(self.tableWidget)
def highlightRowsUsingNodes(self, dagNodes):
"""
Given a list of dag nodes, highlight all the rows they live in.
"""
if dagNodes is None:
self.tableWidget.clearSelection()
return
for dagNode in dagNodes:
for row in range(self.tableWidget.rowCount()):
widget = self.tableWidget.cellWidget(row, 0)
if widget.pointerToDataPacket.sourceNode is dagNode:
self.tableWidget.selectRow(row)
def handleMouseover(self, dagNodes):
"""
Highlight the row the mouse is over, and emit this class' mouseover signal.
"""
self.highlightRowsUsingNodes(dagNodes)
self.mouseover.emit(dagNodes)
def rebuild(self, sceneGraph, selectedDagNode):
"""
Rebuild the current widget, given a scene graph object and the selected
dag node. A value of None in the sceneGraph field clears the widget, and
a value of none in the selectedDagNode field displays all nodes including
the selected one.
"""
if not sceneGraph:
self.tableWidget.setRowCount(0)
return
# Count the number of rows
rowCount = len([dp for dp in sceneGraph if dp.sourceNode != selectedDagNode])
self.tableWidget.setRowCount(rowCount)
index = 0
for dataPacket in sceneGraph:
if dataPacket.sourceNode == selectedDagNode:
continue
self.tableWidget.setRowHeight(index, 20)
# If the selected dag node can't read the datatype, make it obvious
disabled = False
if selectedDagNode.dataPacketTypesAccepted() and type(dataPacket) not in selectedDagNode.dataPacketTypesAccepted():
disabled = True
# Add the text field with enhanced middle-button drag'n'drop functionality
class DraggableTextWidget(QtGui.QLabel):
# Signals
mouseover = QtCore.Signal(node.DagNode)
def __init__(self, dataPacket, *args):
super(DraggableTextWidget, self).__init__(*args)
self.pointerToDataPacket = dataPacket
self.setStyleSheet("background:transparent;")
def mousePressEvent(self, event):
#if event.buttons() != QtCore.Qt.MiddleButton:
# return QtGui.QLabel().mousePressEvent(event)
mimeData = QtCore.QMimeData()
dragText = data_packet.scenegraphLocationString(self.pointerToDataPacket)
mimeData.setText(dragText)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.exec_(QtCore.Qt.CopyAction)
#QtGui.QLabel.mousePressEvent(self, event)
def enterEvent(self, event):
self.mouseover.emit([self.pointerToDataPacket.sourceNode])
QtGui.QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.mouseover.emit(None)
QtGui.QLabel.leaveEvent(self, event)
textWidget = DraggableTextWidget(dataPacket)
textWidget.setTextFormat(QtCore.Qt.RichText)
colorString = "00aa00" if dataPacket.dataPresent() else "aa0000"
if disabled:
colorString = "868686"
textWidget.setText("<html><font color=\"#%s\"> %s</font> - %s</html>" % (colorString, dataPacket.typeStr(), data_packet.shorthandScenegraphLocationString(dataPacket)))
self.tableWidget.setCellWidget(index, 0, textWidget)
# Chain the text edit mouseover signal out with property name and value
textWidget.mouseover.connect(self.handleMouseover)
index += 1
# Unsure if this is really necessary, but it makes a difference
self.repaint()
|
mottosso/deplish
|
deplish/scenegraph_widget.py
|
Python
|
bsd-3-clause
| 5,727
|
from scipy.special import erfinv
import glob, os, logging, sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cPickle as pickle
import json, requests
def read_candidates(candsfile, snrmin=0, snrmax=999, returnstate=False):
""" Reads candidate file and returns data as python object.
candsfile is pkl file (for now) with (1) state dict and (2) cands object.
cands object can either be a dictionary or tuple of two numpy arrays.
Return tuple of two numpy arrays (location, properties).
returned values can be filtered by snrmin and snrmax (on absolute value).
returnstate will instead return (loc, prop, state).
"""
# read in pickle file of candidates
try:
with open(candsfile, 'rb') as pkl:
d = pickle.load(pkl)
cands = pickle.load(pkl)
except IOError:
logger.error('Trouble parsing candsfile')
loc = np.array([])
prop = np.array([])
if returnstate:
return (loc, prop, d)
else:
return (loc, prop)
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
if isinstance(cands, dict):
loc = []; prop = []
for kk in sorted(cands.keys()):
if ((np.abs(cands[kk][snrcol]) > snrmin) and (np.abs(cands[kk][snrcol]) < snrmax)):
loc.append( list(kk) )
prop.append( list(cands[kk]) )
loc = np.array(loc)
prop = np.array(prop)
elif isinstance(cands, tuple):
loc, prop = cands
assert isinstance(loc, np.ndarray) and isinstance(prop, np.ndarray), 'if cands object is tuple, contents must be two ndarrays'
snrsel = np.where( (np.abs(prop[:, snrcol]) > snrmin) & (np.abs(prop[:, snrcol]) < snrmax) )
loc = loc[snrsel]
prop = prop[snrsel]
else:
logger.error('Cands object (in cands file) must be dict or tuple(np.array, np.array).')
logger.info('Read %d candidates from %s.' % (len(loc), candsfile))
if returnstate:
return loc, prop, d
else:
return loc, prop
def read_noise(noisefile):
""" Function to read a noise file and parse columns.
Works with both per-scan and merged noise files.
"""
noises = pickle.load(open(noisefile, 'r'))
scan = []; seg = []; noiseperbl = []; flagfrac = []; imnoise = []
if len(noises[0]) == 4:
for noise in noises:
seg.append(noise[0]); noiseperbl.append(noise[1])
flagfrac.append(noise[2]); imnoise.append(noise[3])
return (np.array(seg), np.array(noiseperbl), np.array(flagfrac), np.array(imnoise))
elif len(noises[0]) == 5:
for noise in noises:
scan.append(noise[0])
seg.append(noise[1]); noiseperbl.append(noise[2])
flagfrac.append(noise[3]); imnoise.append(noise[4])
return (np.array(scan), np.array(seg), np.array(noiseperbl), np.array(flagfrac), np.array(imnoise))
else:
logger.warn('structure of noise file not understood. first entry should be length 4 of 5.')
def merge_segments(filename, scan, cleanup=True, sizelimit=0):
""" Merges cands/noise pkl files from multiple segments to single cands/noise file.
Expects segment cands pkls with have (1) state dict and (2) cands dict.
Writes tuple state dict and duple of numpy arrays
A single pkl written per scan using root name fileroot.
if cleanup, it will remove segments after merging.
if sizelimit, it will reduce the output file to be less than this many MB.
"""
workdir = os.path.dirname(filename)
fileroot = os.path.basename(filename)
candslist = glob.glob(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + 'seg*.pkl'))
noiselist = glob.glob(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + 'seg*.pkl'))
candssegs = sorted([candsfile.rstrip('.pkl').split('seg')[1] for candsfile in candslist])
noisesegs = sorted([noisefile.rstrip('.pkl').split('seg')[1] for noisefile in noiselist])
# test for good list with segments
if not candslist and not noiselist:
logger.warn('candslist and noiselist are empty.')
return
# aggregate cands over segments
if not os.path.exists(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl')):
logger.info('Aggregating cands over segments %s for fileroot %s, scan %d' % (str(candssegs), fileroot, scan))
logger.debug('%s' % candslist)
cands = {}
for candsfile in candslist:
with open(candsfile, 'r') as pkl:
state = pickle.load(pkl)
result = pickle.load(pkl)
for kk in result.keys():
cands[kk] = result[kk]
segment = state.pop('segment') # remove this key, as it has no meaning after merging segments
# optionally limit size
if sizelimit and len(cands):
logger.debug('Checking size of cands dictionary...')
if 'snr2' in state['features']:
snrcol = state['features'].index('snr2')
elif 'snr1' in state['features']:
snrcol = state['features'].index('snr1')
candsize = sys.getsizeof(cands[cands.keys()[0]])/1e6
maxlen = int(sizelimit/candsize)
if len(cands) > maxlen: # need to reduce length to newlen
logger.info('cands dictionary of length %.1f would exceed sizelimit of %d MB. Trimming to strongest %d candidates' % (len(cands), sizelimit, maxlen))
snrs = [abs(cands[k][snrcol]) for k in cands.iterkeys()] # take top snrs
snrsort = sorted(snrs, reverse=True)
snrmax = snrsort[maxlen] # get min snr for given length limit
cands = {k: v for k,v in cands.items() if abs(v[snrcol]) > snrmax} # new cands dict
# write cands to single file
with open(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl'), 'w') as pkl:
pickle.dump(state, pkl, protocol=2)
pickle.dump( (np.array(cands.keys()), np.array(cands.values())), pkl, protocol=2)
if cleanup:
if os.path.exists(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl')):
for candsfile in candslist:
os.remove(candsfile)
else:
logger.warn('Merged candsfile already exists for scan %d. Not merged.' % scan)
# aggregate noise over segments
if not os.path.exists(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl')):
logger.info('Aggregating noise over segments %s for fileroot %s, scan %d' % (str(noisesegs), fileroot, scan))
logger.debug('%s' % noiselist)
noise = []
for noisefile in noiselist:
with open(noisefile, 'r') as pkl:
result = pickle.load(pkl) # gets all noises for segment as list
noise += result
# write noise to single file
if len(noise):
with open(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl'), 'w') as pkl:
pickle.dump(noise, pkl, protocol=2)
if cleanup:
if os.path.exists(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl')):
for noisefile in noiselist:
os.remove(noisefile)
else:
logger.warn('Merged noisefile already exists for scan %d. Not merged.' % scan)
def cleanup(workdir, fileroot, scans=[]):
""" Cleanup up noise and cands files.
Finds all segments in each scan and merges them into single cand/noise file per scan.
"""
os.chdir(workdir)
# merge cands/noise files per scan
for scan in scans:
pc.merge_segments(fileroot, scan, cleanup=True, sizelimit=2.)
def merge_noises(pkllist, outroot=''):
""" Merge noise files from multiple segments.
Output noise file has scan number at start of each entry.
"""
assert isinstance(pkllist, list), "pkllist must be list of file names"
if not outroot:
outroot = '_'.join(pkllist[0].split('_')[1:-1])
workdir = os.path.dirname(pkllist[0])
mergepkl = os.path.join(workdir, 'noise_' + outroot + '_merge.pkl')
pkllist = [pkllist[i] for i in range(len(pkllist)) if ('merge' not in pkllist[i]) and ('seg' not in pkllist[i])] # filter list down to per-scan noise pkls
pkllist.sort(key=lambda i: int(i.rstrip('.pkl').split('_sc')[1])) # sort by scan assuming filename structure
scans = [int(ff.rstrip('.pkl').split('_sc')[1]) for ff in pkllist]
logger.info('Aggregating noise from scans %s' % scans)
allnoise = []
for pklfile in pkllist:
scan = int(pklfile.rstrip('.pkl').split('_sc')[1]) # parsing filename to get scan number
with open(pklfile, 'r') as pkl:
noises = pickle.load(pkl) # gets all noises for segment as list
allnoise += [[scan] + list(noise) for noise in noises] # prepend scan number
# write noise to single file
if os.path.exists(mergepkl):
logger.info('Overwriting merged noise file %s' % mergepkl)
os.remove(mergepkl)
else:
logger.info('Writing merged noise file %s' % mergepkl)
with open(mergepkl, 'w') as pkl:
pickle.dump(allnoise, pkl, protocol=2)
def merge_cands(pkllist, outroot='', remove=[], snrmin=0, snrmax=999):
""" Takes cands pkls from list and filteres to write new single "merge" pkl.
Ignores segment cand files.
remove is a list [t0,t1,t2,t3], where t0-t1, t2-t3 define the time ranges in seconds.
snrmin, snrmax define how to filter cands read and written by abs(snr)
"""
assert isinstance(pkllist, list), "pkllist must be list of file names"
if not outroot:
outroot = '_'.join(pkllist[0].split('_')[1:-1])
workdir = os.path.dirname(pkllist[0])
mergepkl = os.path.join(workdir, 'cands_' + outroot + '_merge.pkl')
pkllist = [pkllist[i] for i in range(len(pkllist)) if ('merge' not in pkllist[i]) and ('seg' not in pkllist[i])]
pkllist.sort(key=lambda i: int(i.rstrip('.pkl').split('_sc')[1])) # assumes filename structure
scans = [int(ff.rstrip('.pkl').split('_sc')[1]) for ff in pkllist]
logger.info('Aggregating cands from scans %s' % scans)
# get sample state dict. use 'dict' suffix to define multi-scan metadata dictionaries
mergeloc = []; mergeprop = []; mergetimes = []
segmenttimesdict = {}
starttime_mjddict = {}
for pklfile in pkllist:
# get scan number and read candidates
locs, props, d = read_candidates(pklfile, snrmin=snrmin, snrmax=snrmax, returnstate=True)
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
scan = int(pklfile.rstrip('.pkl').split('_sc')[1]) # parsing filename to get scan number
segmenttimesdict[scan] = d['segmenttimes']
starttime_mjddict[scan] = d['starttime_mjd']
times = int2mjd(d, locs)
# build merged loc,prop lists
for i in range(len(locs)):
loc = list(locs[i])
loc.insert(0, scan)
prop = list(props[i])
mergeloc += [loc]
mergeprop += [prop]
mergetimes.append(times[i])
mergeloc = np.array(mergeloc)
mergeprop = np.array(mergeprop)
mergetimes = np.array(mergetimes)
# filter by remove, if needed
if remove:
mergetimes -= mergetimes.min()
ww = np.ones(len(mergetimes), dtype=bool) # initialize pass filter
nranges = len(remove)
for first in range(0,nranges,2):
badrange0 = remove[first]
badrange1 = remove[first+1]
ww = ww & np.where( (mergetimes < badrange0) | (mergetimes > badrange1), True, False )
mergeloc = mergeloc[ww]
mergeprop = mergeprop[ww]
# update metadata
d['featureind'].insert(0, 'scan')
d['remove'] = remove
d['segmenttimesdict'] = segmenttimesdict
d['starttime_mjddict'] = starttime_mjddict
logger.info('Writing filtered set of %d candidates to %s' % (len(mergeloc), mergepkl))
# write up new pkl
pkl = open(mergepkl, 'w')
pickle.dump(d, pkl, protocol=2)
pickle.dump((mergeloc, mergeprop), pkl, protocol=2)
pkl.close()
def merge_scans(workdir, fileroot, scans, snrmin=0, snrmax=999):
""" Merge cands/noise files over all scans """
pkllist = [ff for ff in
[os.path.join(workdir, 'cands_{0}_sc{1}.pkl'.format(fileroot, scan))
for scan in scans] if os.path.exists(ff)]
merge_cands(pkllist, outroot=fileroot, snrmin=snrmin, snrmax=snrmax)
pkllist = [ff for ff in
[os.path.join(workdir, 'noise_{0}_sc{1}.pkl'.format(fileroot, scan))
for scan in scans] if os.path.exists(ff)]
merge_noises(pkllist, fileroot)
def split_candidates(candsfile, featind1, featind2, candsfile1, candsfile2):
""" Split features from one candsfile into two new candsfiles
featind1/2 is list of indices to take from d['features'].
New features and updated state dict go to candsfile1/2.
"""
with open(candsfile, 'rb') as pkl:
d = pickle.load(pkl)
cands = pickle.load(pkl)
features = d['features']
d1 = d.copy()
d2 = d.copy()
d1['features'] = [features[i] for i in featind1]
d2['features'] = [features[i] for i in featind2]
cands1 = {}
cands2 = {}
for key in cands:
cands1[key] = tuple([cands[key][i] for i in featind1])
cands2[key] = tuple([cands[key][i] for i in featind2])
with open(candsfile1, 'w') as pkl:
pickle.dump(d1, pkl, protocol=2)
pickle.dump(cands1, pkl, protocol=2)
with open(candsfile2, 'w') as pkl:
pickle.dump(d2, pkl, protocol=2)
pickle.dump(cands2, pkl, protocol=2)
def nbcompile(workdir, fileroot, html=True, basenb='', agdir=''):
""" Run analysis pipeline from jupyter base notebook and save as notebook and html.
html will also compile static html version
basenb can be provided, else will get distributed version.
agdir is the activegit repo (optional)
"""
import inspect, rtpipe, shutil
from subprocess import call
os.environ['fileroot'] = fileroot
if agdir:
os.environ['agdir'] = agdir
if not basenb:
basenb = os.path.join(os.path.dirname(os.path.dirname(inspect.getfile(rtpipe))), 'notebooks/baseinteract.ipynb')
logger.info('Moving to {0} and building notebook for {1}'.format(workdir, fileroot))
os.chdir(workdir)
shutil.copy(basenb, '{0}/{1}.ipynb'.format(workdir, fileroot))
cmd = 'jupyter nbconvert {0}.ipynb --inplace --execute --to notebook --allow-errors --ExecutePreprocessor.timeout=3600'.format(fileroot).split(' ')
status = call(cmd)
cmd = 'jupyter trust {0}.ipynb'.format(fileroot).split(' ')
status = call(cmd)
if html:
cmd = 'jupyter nbconvert {0}.ipynb --to html --output {0}.html'.format(fileroot).split(' ')
status = call(cmd)
def thresholdcands(candsfile, threshold, numberperscan=1):
""" Returns list of significant candidate loc in candsfile.
Can define threshold and maximum number of locs per scan.
Works on merge or per-scan cands pkls.
"""
# read metadata and define columns of interest
d = pickle.load(open(candsfile, 'r'))
try:
scancol = d['featureind'].index('scan')
except ValueError:
scancol = -1
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
# read data and define snrs
loc, prop = pc.read_candidates(candsfile)
snrs = [prop[i][snrcol] for i in range(len(prop)) if prop[i][snrcol] > threshold]
# calculate unique list of locs of interest
siglocs = [list(loc[i]) for i in range(len(prop)) if prop[i][snrcol] > threshold]
siglocssort = sorted(zip([list(ll) for ll in siglocs], snrs), key=lambda stuff: stuff[1], reverse=True)
if scancol >= 0:
scanset = list(set([siglocs[i][scancol] for i in range(len(siglocs))]))
candlist= []
for scan in scanset:
logger.debug('looking in scan %d' % scan)
count = 0
for sigloc in siglocssort:
if sigloc[0][scancol] == scan:
logger.debug('adding sigloc %s' % str(sigloc))
candlist.append(sigloc)
count += 1
if count >= numberperscan:
break
else:
candlist = siglocssort[:numberperscan]
logger.debug('Returning %d cands above threshold %.1f' % (len(candlist), threshold))
return [loc for loc,snr in candlist]
def postcands(mergepkl, url='http://localhost:9200/realfast/cands/_bulk?', snrmin=0, snrmax=999):
""" Posts candidate info to elasticsearch index """
loc, prop, d = read_candidates(mergepkl, snrmin=snrmin, snrmax=snrmax, returnstate=True)
times = int2mjd(d, loc)
alldata = []
for i in range(len(loc)):
data = {}
data['filename'] = os.path.basename(d['filename'])
data['@timestamp'] = times[i]
for featureind in d['featureind']:
data[featureind] = loc[i][d['featureind'].index(featureind)]
for feature in d['features']:
data[feature] = prop[i, d['features'].index(feature)]
idobj = {}
idobj['_id'] = '{:.7f}_{}_{}'.format(data['@timestamp'], data['dmind'], data['dtind'])
alldata.append({"index":idobj})
alldata.append(data)
jsonStr = json.dumps(alldata, separators=(',', ':'))
cleanjson = jsonStr.replace('}}, ','}}\n').replace('},', '}\n').replace(']', '').replace('[', '')
r = requests.post(url, data=cleanjson)
logger.debug('%s' % r)
def int2mjd(d, loc):
""" Function to convert segment+integration into mjd seconds.
"""
# needs to take merge pkl dict
if len(loc):
intcol = d['featureind'].index('int')
segmentcol = d['featureind'].index('segment')
if d.has_key('segmenttimesdict'): # using merged pkl
scancol = d['featureind'].index('scan')
t0 = np.array([d['segmenttimesdict'][loc[i,scancol]][loc[i,segmentcol],0] for i in range(len(loc))])
else:
t0 = d['segmenttimes'][loc[:,segmentcol]][:,0]
return (t0 + (d['inttime']/(24*3600.))*loc[:,intcol]) * 24*3600
else:
return np.array([])
def plot_full(candsfile, cands, mode='im'):
""" Plot 'full' features, such as cutout image and spectrum.
"""
loc, prop, d = read_candidates(candsfile, returnstate=True)
npixx, npixy = prop[0][4].shape
nints, nchan, npol = prop[0][5].shape
bin = 10
plt.figure(1)
for i in cands:
if mode == 'spec':
rr = np.array([np.abs(prop[i][5][:,i0:i0+bin,0].mean(axis=1)) for i0 in range(0,nchan,bin)])
ll = np.array([np.abs(prop[i][5][:,i0:i0+bin,1].mean(axis=1)) for i0 in range(0,nchan,bin)])
sh = ll.shape
data = np.concatenate( (rr, np.zeros(shape=(sh[0], sh[1]/2)), ll), axis=1)
elif mode == 'im':
data = prop[i][4]
plt.subplot(np.sqrt(len(cands)), np.sqrt(len(cands)), cands.index(i))
plt.imshow(data, interpolation='nearest')
plt.show()
def make_psrrates(pkllist, nbins=60, period=0.156):
""" Visualize cands in set of pkl files from pulsar observations.
Input pkl list assumed to start with on-axis pulsar scan, followed by off-axis scans.
nbins for output histogram. period is pulsar period in seconds (used to find single peak for cluster of detections).
"""
# get metadata
state = pickle.load(open(pkllist[0], 'r')) # assume single state for all scans
if 'image2' in state['searchtype']:
immaxcol = state['features'].index('immax2')
logger.info('Using immax2 for flux.')
elif 'image1' in state['searchtype']:
try:
immaxcol = state['features'].index('immax1')
logger.info('Using immax1 for flux.')
except:
immaxcol = state['features'].index('snr1')
logger.info('Warning: Using snr1 for flux.')
# read cands
for pklfile in pkllist:
loc, prop = read_candidates(pklfile)
ffm = []
if (loc):
times = int2mjd(state, loc)
for (mint,maxt) in zip(np.arange(times.min()-period/2,times.max()+period/2,period), np.arange(times.min()+period/2,times.max()+3*period/2,period)):
ff = np.array([prop[i][immaxcol] for i in range(len(prop))])
mm = ff[np.where( (times >= mint) & (times < maxt) )]
if mm:
ffm.append(mm.max())
ffm.sort()
logger.info('Found %d unique pulses.' % len(ffm))
# calculate params
if pkllist.index(pklfile) == 0:
duration0 = times.max() - times.min()
ratemin = 1/duration0
ratemax = len(ffm)/duration0
rates = np.linspace(ratemin, ratemax, nbins)
f0m = ffm
elif pkllist.index(pklfile) == 1:
duration1 = times.max() - times.min()
f1m = ffm
elif pkllist.index(pklfile) == 2:
f2m = ffm
elif pkllist.index(pklfile) == 3:
f3m = ffm
# calc rates
f0 = []; f1 = []; f2 = []; f3 = []
for rr in rates:
num0 = (np.round(rr*duration0)).astype(int)
num1 = (np.round(rr*duration1)).astype(int)
if (num0 > 0) and (num0 <= len(f0m)):
f0.append((rr,f0m[-num0]))
if (num1 > 0) and (num1 <= len(f1m)):
f1.append((rr,f1m[-num1]))
if (num1 > 0) and (num1 <= len(f2m)):
f2.append((rr,f2m[-num1]))
if len(pkllist) == 4:
if f3m:
if (num1 > 0) and (num1 <= len(f3m)):
f3.append((rr,f3m[-num1]))
if f3:
return {0: np.array(f0).transpose(), 1: np.array(f1).transpose(), 2: np.array(f2).transpose(), 3: np.array(f3).transpose()}
else:
return {0: np.array(f0).transpose(), 1: np.array(f1).transpose(), 2: np.array(f2).transpose()}
def plot_psrrates(pkllist, outname=''):
""" Plot cumulative rate histograms. List of pkl files in order, as for make_psrrates.
"""
if not outname:
outname = 'tmp.png'
labels = {0: 'Flux at 0\'', 1: 'Flux at 7\'', 2: 'Flux at 15\'', 3: 'Flux at 25\''}
labelsr = {1: 'Flux Ratio 7\' to 0\'', 2: 'Flux Ratio 15\' to 0\'', 3: 'Flux Ratio 25\' to 0\''}
colors = {0: 'b.', 1: 'r.', 2: 'g.', 3: 'y.'}
rates = make_psrrates(pkllist)
plt.clf()
fig = plt.figure(1, figsize=(10,8), facecolor='white')
ax = fig.add_subplot(211, axis_bgcolor='white')
for kk in rates.keys():
flux, rate = rates[kk]
plt.plot(flux, rate, colors[kk], label=labels[kk])
plt.setp( ax.get_xticklabels(), visible=False)
plt.ylabel('Flux (Jy)', fontsize='20')
plt.legend(numpoints=1)
plt.loglog()
ax2 = fig.add_subplot(212, sharex=ax, axis_bgcolor='white')
flux0, rate0 = rates[0]
for kk in rates.keys():
flux, rate = rates[kk]
if kk == 1:
r10 = [rate[i]/rate0[np.where(flux0 == flux[i])[0][0]] for i in range(len(rate))]
plt.plot(flux, r10, colors[kk], label=labelsr[kk])
elif kk == 2:
r20 = [rate[i]/rate0[np.where(flux0 == flux[i])[0][0]] for i in range(len(rate))]
plt.plot(flux, r20, colors[kk], label=labelsr[kk])
elif kk == 3:
r30 = [rate[i]/rate0[np.where(flux0 == flux[i])[0][0]] for i in range(len(rate))]
plt.plot(flux, r30, colors[kk], label=labelsr[kk])
plt.xlabel('Rate (1/s)', fontsize='20')
plt.ylabel('Flux ratio', fontsize='20')
plt.legend(numpoints=1)
plt.subplots_adjust(hspace=0)
# find typical ratio. avoid pulsar period saturation and low-count regimes (high and low ends)
if len(rates) == 4:
logger.info('flux ratio, lowest common (1/0, 2/0, 3/0):', (r10[len(r30)-1], r20[len(r30)-1], r30[-1]))
logger.info('flux ratio, high end (1/0, 2/0, 3/0):', (r10[-1], r20[-1], r30[-1]))
elif len(rates) == 3:
logger.info('flux ratio, lowest common (1/0, 2/0):', (r10[len(r20)-1], r20[-1]))
logger.info('flux ratio, high end (1/0, 2/0):', (r10[-1], r20[-1]))
plt.savefig(outname)
def mock_fluxratio(candsfile, mockcandsfile, dmbin=0):
""" Associates mock cands with detections in candsfile by integration.
Returns ratio of detected to expected flux for all associations.
"""
loc, prop = read_candidates(candsfile)
loc2, prop2 = read_candidates(mockcandsfile)
dmselect = np.where(loc[:,2] == dmbin)[0]
mocki = [i for i in loc2[:,1].astype(int)] # known transients
rat = []; newloc = []; newprop = []
for i in mocki:
try:
detind = list(loc[dmselect,1]).index(i) # try to find detection
rat.append(prop[dmselect][detind][1]/prop2[mocki.index(i)][1])
newloc.append(list(loc2[mocki.index(i)]))
newprop.append(list(prop2[mocki.index(i)]))
except ValueError:
pass
return rat, np.array(newloc), newprop
|
demorest/rtpipe
|
rtpipe/parsecands.py
|
Python
|
bsd-3-clause
| 25,631
|
#!/usr/bin/env python
import string
import struct
import sys
import mtbl
def merge_func(key, val0, val1):
i0 = mtbl.varint_decode(val0)
i1 = mtbl.varint_decode(val1)
return mtbl.varint_encode(i0 + i1)
def main(input_fnames, output_fname):
merger = mtbl.merger(merge_func)
writer = mtbl.writer(output_fname, compression=mtbl.COMPRESSION_SNAPPY)
for fname in input_fnames:
reader = mtbl.reader(fname)
merger.add_reader(reader)
merger.write(writer)
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s <MTBL INPUT FILE> [<MTBL INPUT FILE>...] <MTBL OUTPUT FILE>\n' % sys.argv[0])
sys.exit(1)
main(sys.argv[1:-1], sys.argv[-1])
|
edmonds/pymtbl
|
examples/wf/pymtbl_wf_merge.py
|
Python
|
isc
| 721
|
import lintreview.github as github
from . import load_fixture
from mock import call
from mock import patch
from mock import Mock
from nose.tools import eq_
from pygithub3 import Github
from requests.models import Response
config = {
'GITHUB_URL': 'https://api.github.com/',
'GITHUB_USER': 'octocat',
'GITHUB_PASSWORD': ''
}
def test_get_client():
gh = github.get_client(config, 'markstory', 'lint-review')
assert isinstance(gh, Github)
def test_get_lintrc():
gh = github.get_client(config, 'markstory', 'lint-review')
lintrc = github.get_lintrc(gh)
assert lintrc is not None, 'Should get something'
assert isinstance(lintrc, str)
@patch('pygithub3.core.client.Client.get')
def test_register_hook(http):
response = Response()
response._content = '[]'
http.return_value = response
gh = Github()
gh.repos.hooks.create = Mock()
url = 'http://example.com/review/start'
github.register_hook(gh, url, 'mark', 'lint-test')
assert gh.repos.hooks.create.called, 'Create not called'
calls = gh.repos.hooks.create.call_args_list
expected = call({
'name': 'web',
'active': True,
'config': {
'content_type': 'json',
'url': url,
},
'events': ['pull_request']
}, user='mark', repo='lint-test')
eq_(calls[0], expected)
@patch('pygithub3.core.client.Client.get')
def test_register_hook__already_exists(http):
response = Response()
response._content = load_fixture('webhook_list.json')
http.return_value = response
gh = Github()
gh.repos.hooks.create = Mock()
url = 'http://example.com/review/start'
github.register_hook(gh, url, 'mark', 'lint-test')
assert gh.repos.hooks.create.called is False, 'Create called'
@patch('pygithub3.core.client.Client.get')
def test_unregister_hook__success(http):
response = Response()
response._content = load_fixture('webhook_list.json')
http.return_value = response
gh = Github()
gh.repos.hooks.delete = Mock()
url = 'http://example.com/review/start'
github.unregister_hook(gh, url, 'mark', 'lint-test')
assert gh.repos.hooks.delete.called, 'Delete not called'
@patch('pygithub3.core.client.Client.get')
def test_unregister_hook__not_there(http):
response = Response()
response._content = "[]"
http.return_value = response
gh = Github()
gh.repos.hooks.delete = Mock()
url = 'http://example.com/review/start'
try:
github.unregister_hook(gh, url, 'mark', 'lint-test')
assert False, 'No exception'
except:
assert True, 'Exception raised'
assert gh.repos.hooks.delete.called is False, 'Delete called'
|
alexBaizeau/lint-review
|
tests/test_github.py
|
Python
|
mit
| 2,712
|
from datetime import datetime
import numpy as np
from netcdf import netcdf as nc
from multiprocessing import Process, Pipe
from itertools import izip
from cache import memoize
import multiprocessing as mp
import os
import logging
class ProcessingStrategy(object):
def __init__(self, algorithm, loader, cache):
self.algorithm = algorithm
self.algorithm.create_variables(loader, cache, self)
def int_to_dt(self, time):
return datetime.utcfromtimestamp(int(time))
@property
@memoize
def months(self):
months = pmap(lambda t: self.int_to_dt(t).month, self.times)
return np.array(months).reshape(self.times.shape)
@property
@memoize
def gamma(self):
to_julianday = lambda time: self.int_to_dt(time).timetuple().tm_yday
days_of_year = lambda time: to_julianday(
(datetime(self.int_to_dt(time).year, 12, 31)).timetuple()[7])
times = self.times
total_days = np.array(pmap(days_of_year, times)).reshape(times.shape)
julian_day = np.array(pmap(to_julianday, times)).reshape(times.shape)
return self.getdailyangle(julian_day, total_days)
@property
@memoize
def decimalhour(self):
int_to_dt = lambda t: datetime.utcfromtimestamp(t)
int_to_decimalhour = (lambda time: int_to_dt(time).hour +
int_to_dt(time).minute/60.0 +
int_to_dt(time).second/3600.0)
result = pmap(int_to_decimalhour, self.times)
return np.array(result).reshape(self.times.shape)
def calculate_slots(self, images_per_hour):
return np.round(self.decimalhour * images_per_hour).astype(int)
def spawn(f):
def fun(pipe, x):
pipe.send(f(x))
pipe.close()
return fun
def mp_map(f, X):
pipe = [Pipe() for x in X]
proc = [Process(target=spawn(f), args=(c, x))
for x, (p, c) in izip(X, pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
return [p.recv() for (p, c) in pipe]
pmap = map # if 'armv6l' in list(os.uname()) else mp_map
try:
raise Exception('Force CPU')
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
cuda_can_help = True
import gpu as geo
print "<< using CUDA cores >>"
except Exception, e:
if e.message != 'Force CPU':
logging.warn(e)
cuda_can_help = False
import cpu as geo
|
ahMarrone/solar_radiation_model
|
models/core.py
|
Python
|
mit
| 2,493
|
from setuptools import setup, find_packages
setup(
name = "QuickBooks",
version = '0.2.2',
packages = find_packages(),
install_requires = ['requests', 'requests-oauthlib', 'python-keyczar==0.71c', 'django-extensions'],
include_package_data = True,
# metadata for upload to PyPI
author = "Hans Kuder",
author_email = "hans@hiidef.com",
maintainer = "Brent Hagany",
maintainer_email = "brent@hiidef.com",
description = "Django Quickbooks App",
license = "MIT License",
keywords = "django quickbooks intuit",
url = "http://github.com/hiidef/django-quickbooks",
)
|
hiidef/django-quickbooks
|
setup.py
|
Python
|
mit
| 624
|
import flask
import pymysql.cursors
from donut.auth_utils import is_admin
from donut.modules.core import helpers as core
def get_group_list_data(fields=None, attrs={}):
"""
Queries the database and returns list of group data constrained by the
specified attributes.
Arguments:
fields: The fields to return. If None specified, then default_fields
are used.
attrs: The attributes of the group to filter for.
Returns:
result: The fields and corresponding values of groups with desired
attributes. In the form of a list of dicts with key:value of
columnname:columnvalue.
"""
all_returnable_fields = [
"group_id", "group_name", "group_desc", "type", "anyone_can_send",
"newsgroups", "visible"
]
default_fields = ["group_id", "group_name", "group_desc", "type"]
if fields == None:
fields = default_fields
else:
if any(f not in all_returnable_fields for f in fields):
return "Invalid field"
query = "SELECT " + ', '.join(fields) + " FROM groups "
if attrs:
query += "WHERE "
query += " AND ".join([key + "= %s" for key in attrs.keys()])
values = list(attrs.values())
# Execute the query
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, values)
return list(cursor.fetchall())
def get_group_positions(group_id):
"""
Returns a list of all positions for a group with the given id.
Arguments:
group_id: The integer id of the group
"""
query = "SELECT pos_id, pos_name FROM positions "
query += "WHERE group_id = %s"
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [group_id])
return list(cursor.fetchall())
def get_position_holders(pos_id):
"""
Queries the database and returns a list of all members and their
Names that current hold the position specified by pos_id. This includes
the case where person A holds position Y because he holds position X
and Y is linked to X
Arguments:
pos_id: The position to look up -- may be a single int or a list of int's
Returns:
results: A list where each element describes a user who holds the
position. Each element is a dict with key:value of
columnname:columnvalue
"""
if isinstance(pos_id, list):
if not pos_id:
return []
else:
pos_id = [pos_id]
query = f"""
SELECT DISTINCT user_id, full_name, hold_id, start_date, end_date
FROM current_position_holders NATURAL JOIN members NATURAL JOIN members_full_name
WHERE pos_id IN ({', '.join('%s' for id in pos_id)})
ORDER BY last_name, full_name
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, pos_id)
return cursor.fetchall()
def get_positions_held(user_id):
''' Returns a list of all position id's held (directly or indirectly)
by the given user. If no positions are found, [] is returned. '''
query = 'SELECT DISTINCT pos_id FROM current_position_holders WHERE user_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, user_id)
res = cursor.fetchall()
return [row['pos_id'] for row in res]
def get_position_id(group_name, position_name):
''' Returns the position id associated with the given group name and
position name '''
query = '''SELECT pos_id FROM positions WHERE pos_name = %s
AND group_id = (SELECT min(group_id) FROM groups WHERE group_name = %s)'''
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (position_name, group_name))
res = cursor.fetchone()
return res and res['pos_id']
def get_group_data(group_id, fields=None):
"""
Queries the databse and returns member data for the specified group_id.
Arguments:
group_id: The group to look up
fields: The fields to return. If None are specified, then
default_fields are used
Returns:
result: The fields and corresponding values of group with group_id.
In the form of a dict with key:value of columnname:columnalue
"""
all_returnable_fields = [
"group_id", "group_name", "group_desc", "type", "anyone_can_send",
"newsgroups", "visible"
]
default_fields = ["group_id", "group_name", "group_desc", "type"]
if fields is None:
fields = default_fields
else:
if any(f not in all_returnable_fields for f in fields):
return "Invalid field"
query = "SELECT " + ', '.join(fields) + " FROM groups "
query += "WHERE group_id = %s"
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [group_id])
result = cursor.fetchone()
return result or {}
def get_position_data(fields=None, include_house_and_ug=True, order_by=None):
"""
Queries database for all instances where an individual holds a position.
This includes when person A directly holds position Y, or when person A
indirectly holds Y by holding position X and with
an entry in the position relation table that links position X to position Y.
Arguments:
fields: The fields to return. If None are specified, then
default_fields are used
include_house_and_ug: Whether to include house membership positions and
ug-* group membership positions
order_by: Fields to order the results by, in ascending order
Returns:
result A list where each element is a dict corresponding to a person holding
a position. key:value pairs are columnname:columnevalue
"""
all_returnable_fields = [
"user_id", "full_name", "group_id", "group_name", "pos_id", "pos_name",
"start_date", "end_date"
]
default_fields = [
"user_id", "full_name", "group_id", "group_name", "pos_id", "pos_name"
]
if fields is None:
fields = default_fields
else:
if any(f not in all_returnable_fields for f in fields):
return "Invalid field"
# construct query
query = f"""
SELECT DISTINCT {', '.join(fields)}
FROM positions
NATURAL JOIN current_position_holders
NATURAL JOIN members_full_name
NATURAL JOIN groups
"""
if not include_house_and_ug:
query += """
WHERE NOT (
pos_id IN (SELECT pos_id FROM house_positions) OR
(type = 'ug-auto' AND pos_name = 'Member')
)
"""
if order_by:
if not all(field in fields for field in order_by):
return "Invalid ORDER BY fields"
query += " ORDER BY " + ', '.join(order_by)
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def add_position(group_id, pos_name, send=False, control=False, receive=True):
'''
Inserts new position into the database associated
with the given group and with the given name
Arguments:
group_id: the id of the group you want to insert the position into
pos_name: name of the position to be created
'''
# Construct the statement
s = """
INSERT INTO positions (group_id, pos_name, send, control, receive)
VALUES (%s, %s, %s, %s, %s)
"""
# Execute query
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(s, (group_id, pos_name, send, control, receive))
def create_position_holder(pos_id, user_id, start_date, end_date):
'''
Inserts row into position_holders table
Arguments:
pos_id: id of the position
user_id: user id of the person that the position is to be assigned
start_date: Starting date of the holding period, format is 'yyyy-mm-dd'
end_date: end date of the hold period
'''
s = """INSERT INTO position_holders (pos_id, user_id, start_date,
end_date) VALUES (%s, %s, %s, %s)"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(s, (pos_id, user_id, start_date, end_date))
def end_position_holder(hold_id):
"""
Sets the end of the given position hold to yesterday,
thereby removing the user from the position.
"""
query = """
UPDATE position_holders
SET end_date = SUBDATE(CURRENT_DATE, 1)
WHERE hold_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, hold_id)
def create_group(group_name,
group_desc="",
group_type="",
newsgroups=False,
anyone_can_send=False,
visible=False):
"""
Creates a group with the given group_id, group name and other specifications
Arguments:
group_name: The group name
group_desc: Description of group (if there is any)
group_type: Type of group
newgroups: Toggles if group is a news group
anyone_can_send: Toggles if anyone can send emails to this group
visible: Toggles if the group is visible
"""
query = """
INSERT INTO groups (
group_name, group_desc, type, newsgroups, anyone_can_send, visible
) VALUES (%s, %s, %s, %s, %s, %s)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (group_name, group_desc, group_type, newsgroups,
anyone_can_send, visible))
new_group_id = cursor.lastrowid
add_position(new_group_id, "Member")
return new_group_id
def delete_group(group_id):
'''
Deletes the group specified with the group_id and all assocaited
position holders entries and positions
Arguments:
group_id: id of the group to be deleted
'''
s = "DELETE FROM groups WHERE group_id=%s"
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(s, group_id)
def get_members_by_group(group_id):
'''
Queries the database and returns a list of all users associated with
a particular group either because a) They hold a position in the group
or b) They hold a position linked to another position in the group
Arguments:
group_id: id of group in question
Returns:
List where each element is a JSON representing the data of each
person
'''
query = """
SELECT DISTINCT user_id
FROM positions NATURAL JOIN current_position_holders
WHERE group_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [group_id])
result = cursor.fetchall()
# Get data for each user id
members = [row['user_id'] for row in result]
result = core.get_member_data(members)
return result
def is_user_in_group(user_id, group_id):
"""
Returns whether the given user holds any position in the given group
"""
query = """
SELECT pos_id
FROM current_position_holders NATURAL JOIN positions
WHERE user_id = %s AND group_id = %s
LIMIT 1
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id, group_id])
return cursor.fetchone() is not None
def get_group_id(group_name):
"""
Returns the group_id for a group
"""
query = """
SELECT group_id FROM groups WHERE group_name = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, group_name)
res = cursor.fetchone()
return None if res is None else res['group_id']
def can_control(user_id, group_id):
"""
Returns whether the given user has control privileges for the given group.
"""
if is_admin():
return True
query = """
SELECT pos_id
FROM current_position_holders NATURAL JOIN positions
WHERE user_id = %s AND group_id = %s AND control = 1
LIMIT 1
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, group_id))
return cursor.fetchone() is not None
def get_position_group(pos_id):
"""
Returns the group_id of the group that the given position belongs to.
"""
query = 'SELECT group_id FROM positions WHERE pos_id = %s'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, pos_id)
position = cursor.fetchone()
return position and position['group_id']
def get_hold_group(hold_id):
"""
Returns the group_id of the group that
the given (direct) position hold belongs to.
"""
query = """
SELECT group_id
FROM current_direct_position_holders NATURAL JOIN positions
WHERE hold_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, hold_id)
position = cursor.fetchone()
return position and position['group_id']
|
ASCIT/donut-python
|
donut/modules/groups/helpers.py
|
Python
|
mit
| 12,968
|
from __future__ import print_function
letters = [
('b', 'int8'),
('w', 'int16'),
('i', 'int32'),
('l', 'int64'),
('d', 'float64'),
('f', 'float32'),
('c', 'complex64'),
('z', 'complex128') ]
shapes = [
('scalar', ()),
('vector', (False,)),
('row', (True, False)),
('col', (False, True)),
('matrix', (False,False)),
('tensor3', (False,False,False)),
('tensor4', (False,False,False,False)),]
hdr = '============ =========== ==== =========== ================================='
print(hdr)
print('Constructor dtype ndim shape broadcastable')
print(hdr)
for letter in letters:
for shape in shapes:
suff = ',)' if len(shape[1])==1 else ')'
s = '(' + ','.join('1' if b else '?' for b in shape[1]) + suff
print('%s%-10s %-10s %-4s %-10s %-20s' %(
letter[0], shape[0], letter[1], len(shape[1]), s, shape[1]
))
print(hdr)
|
rizar/attention-lvcsr
|
libs/Theano/doc/generate_dtype_tensor_table.py
|
Python
|
mit
| 978
|
#!/usr/bin/env python
#https://bitbucket.org/fotosyn/fotosynlabs/src/9819edca892700e459b828517bba82b0984c82e4/RaspiLapseCam/raspiLapseCam.py?at=master
#http://www.instructables.com/id/Simple-timelapse-camera-using-Raspberry-Pi-and-a-c/#step1
every_minutes=10
HOME_FOLDER="/home/pi/"
sender='bin/send_file.sh'
sender_endpoint='pifollow'
sender_rnd='54628'
sender_piname='pi2'
#
# raspiLapseCam.py
#
# Created by James Moore on 28/07/2013.
# Copyright (c) 2013 Fotosyn. All rights reserved.
#
# Raspberry Pi is a trademark of the Raspberry Pi Foundation.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.>
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
# This script sets up and runs a Python Script which, at intervals invokes a capture
# command to the Raspberry Pi camera, and stores those files locally in a dynamically
# named folder.
# To invoke, copy this script to an easy to find file location on your Raspberry Pi
# (eg. /home/pi/), log into your Raspberry Pi via terminal and type:
#
# sudo python /your/file/location/raspiLapseCam.py (add &) if you wish to run as a
# background task. A process ID will be shown which can be ended with
# sudo kill XXXX (XXXX = process number)
# Based on your settings the application will no begin capturing images
# saving them to your chose file location (same as current location of this file as default.
# Import some frameworks
import os
import sys
import time
import RPi.GPIO as GPIO
from datetime import datetime
sender_path=os.path.join(HOME_FOLDER,sender)
every_seconds=every_minutes*60
if __name__ == '__main__':
if len(sys.argv) > 1:
every_minutes = int(sys.argv[1])
do_loop = True
if every_minutes == 0:
do_loop = False
# Grab the current datetime which will be used to generate dynamic folder names
d = datetime.now()
initYear = d.year
initMonth = d.month
initDate = d.day
initHour = d.hour
initMins = d.minute
# Define the location where you wish to save files. Set to HOME as default.
# If you run a local web server on Apache you could set this to /var/www/ to make them
# accessible via web browser.
folderToSave = HOME_FOLDER + "timelapse_%(yr)04d_%(mo)02d_%(da)02d_%(hr)02d_%(mi)02d" % {
'yr':initYear,'mo':initMonth,'da':initDate,'hr':initHour,'mi':initMins }
if not do_loop:
folderToSave = HOME_FOLDER + "timelapse"
if not os.path.exists(folderToSave):
os.mkdir(folderToSave)
# Set the initial serial for saved images to 1
fileSerial = 1
# Run a WHILE Loop of infinitely
while True:
d = datetime.now()
if True:
# Set FileSerialNumber to 000X using four digits
fileSerialNumber = fileSerial
# Capture the CURRENT time (not start time as set above) to insert into each capture image filename
mins = d.minute
hour = d.hour
day = d.day
month = d.month
year = d.year
# Define the size of the image you wish to capture.
imgWidth = 1024 # Max = 2592
imgHeight = 768 # Max = 1944
print " ====================================== Saving file at %02d:%02d" % ( hour, mins )
outfile = "%(fd)s/%(yr)04d_%(mo)02d_%(da)02d_%(hr)02d_%(mi)02d.jpg" % {
'fd':folderToSave,'se':fileSerialNumber,'yr':year,'mo':month,'da':day,'hr':hour,'mi':mins}
# Capture the image using raspistill. Set to capture with added sharpening, auto white balance and average metering mode
# Change these settings where you see fit and to suit the conditions you are using the camera in
print "capturing file"
os.system( "raspistill -w %(wi)d -h %(he)d -o %(of)s -sh 40 -awb auto -mm average -v -vf 2>%(lo)s 1>>%(lo)s && %(se)s %(see)s %(ser)s %(sep)s %(of)s 2>>%(lo)s 1>> %(lo)s" % {
'wi':imgWidth,'he':imgHeight,'of':outfile,'lo':outfile+'.log',
'se':sender_path,'see':sender_endpoint,'ser':sender_rnd,
'sep':sender_piname} )
print "captured"
# Increment the fileSerial
fileSerial += 1
if not do_loop:
break
else:
# Wait 60 seconds (1 minute) before next capture
time.sleep(every_seconds)
else:
# Just trapping out the WHILE Statement
print " ====================================== Doing nothing at this time"
|
sauloalrpi/pifollowjs
|
pi/raspiLapseCam.py
|
Python
|
mit
| 5,751
|
# checks whether the filename and the flag "unlink_date" match
CONFIGFILE = "cordex_eur_ALL.cfg"
DATABASE = "../../db/cordex_eur_ALL.db"
import sys
sys.path.insert(0, "..")
import os
# import cPickle
###############
import ConfigParser
# reload(ConfigParser)
import esget_logger
# reload(esget_logger)
import esget_db
#reload(esget_db)
import esget_esgf
# reload(esget_esgf)
import esget_fs
# reload(esget_fs)
import esget_wget
# reload(esget_wget)
import esget_local_files
# reload(esget_local_files)
RESETDB = False
config = ConfigParser.SafeConfigParser()
configfile = os.path.join("../../config", CONFIGFILE)
config.read(configfile)
config.set('Paths', 'logfile', '../../log/debug.log')
esget_logger.Logging(config)
C = esget_db.EsgetDB(config)
C.dbname = DATABASE
C.init_db(reset=True if RESETDB else False)
ulfiles = C.get_unlink_files()
import os.path
fnotexist = [x for x in ulfiles if not os.path.isfile(x)]
if len(fnotexist) > 0:
print("some don't exist")
else:
print("all exist")
|
hvwaldow/esget
|
code/misc/check_unlink.py
|
Python
|
mit
| 1,005
|
import argparse
from line_profiler import LineProfiler
import ruler
class Morning(ruler.Grammar):
"""
Implementation of the following grammar::
grammar = who, ' likes to drink ', what;
who = 'John' | 'Peter' | 'Ann';
what = tea | juice;
juice = 'juice';
tea = 'tea', [' ', milk];
milk = 'with milk'
"""
who = ruler.OneOf('John', 'Peter', 'Ann')
juice = ruler.Rule('juice')
milk = ruler.Optional(' with milk')
tea = ruler.Rule('tea', milk)
what = ruler.OneOf(juice, tea)
grammar = ruler.Rule(who, ' likes to drink ', what, '\.')
def one_match():
morning_rule = Morning.create()
assert morning_rule.match('Ann likes to drink tea with milk.')
assert morning_rule.what.tea.milk.matched
def main():
def method_name_parser(text):
parts = text.split('.')
if len(parts) != 2:
raise argparse.ArgumentTypeError('Must be of the form class.method but got ' + text)
for part in parts:
if not part.isidentifier():
raise argparse.ArgumentTypeError(part + ' is not a valid identifier')
return parts
parser = argparse.ArgumentParser(description="Profile the performance of ruler module")
parser.add_argument("method_spec", type=method_name_parser, nargs='+',
help="The method to profile. Must be formatted as class.method. " +
"More than one method can be specified")
args = parser.parse_args()
profile = LineProfiler()
for class_name, method_name in args.method_spec:
class_type = getattr(ruler.rules, class_name)
method = getattr(class_type, method_name)
profile.add_function(method)
profile.enable()
one_match()
profile.disable()
profile.print_stats()
if __name__ == '__main__':
main()
|
yanivmo/rulre
|
performance/profile.py
|
Python
|
mit
| 1,877
|
from typing import Dict, Generic, List, Optional, Union, Sequence, Type, TypeVar
import datetime
import itertools
import re
import attr
import cattr
from google.oauth2 import service_account # type: ignore
import googleapiclient.errors # type: ignore
import googleapiclient.discovery # type: ignore
NIL = "\x00"
DATE_FORMAT = "%m/%d/%Y"
@attr.s(auto_attribs=True, kw_only=True)
class RegisterUser:
hackathon: str
first_name: str
last_name: str
email: str
organization: str
role: str
tshirt_size: str = ""
class Sheets:
"""An API for manipulating the Google Sheet containing hackathon data."""
def __init__(self, *, spreadsheet_id: str, cred_file: str):
scopes = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/spreadsheets",
]
credentials = service_account.Credentials.from_service_account_file(
cred_file, scopes=scopes
)
service = googleapiclient.discovery.build(
"sheets", "v4", credentials=credentials, cache_discovery=False
)
client = service.spreadsheets().values()
self.id = spreadsheet_id
self.hackathons = Hackathons(client=client, spreadsheet_id=spreadsheet_id)
self.registrations = Registrations(client=client, spreadsheet_id=spreadsheet_id)
self.users = Users(client=client, spreadsheet_id=spreadsheet_id)
def get_hackathons(self) -> Sequence["Hackathon"]:
"""Get names of active hackathons."""
return self.hackathons.get_upcoming()
def register_user(self, register_user: RegisterUser):
"""Register user to a hackathon"""
user = self.users.find(register_user.email) or User()
user.first_name = register_user.first_name
user.last_name = register_user.last_name
user.email = register_user.email
user.organization = register_user.organization
user.role = register_user.role
user.tshirt_size = register_user.tshirt_size
self.users.save(user)
registrant = Registrant(
user_email=user.email, hackathon_name=register_user.hackathon
)
if not self.registrations.is_registered(registrant):
self.registrations.register(registrant)
return user
@attr.s(auto_attribs=True, kw_only=True)
class Model:
id: Optional[int] = None
TModel = TypeVar("TModel", bound=Model)
converter = cattr.Converter()
class WhollySheet(Generic[TModel]):
def __init__(
self,
*,
client,
spreadsheet_id: str,
sheet_name: str,
structure: Type[TModel],
key: str,
converter=converter,
):
self.client = client
self.spreadsheet_id = spreadsheet_id
self.sheet_name = sheet_name
self.range = f"{sheet_name}!A1:end"
self.structure = structure
self.key = key
self.converter = converter
def save(self, model: TModel):
if model.id:
self.update(model)
else:
self.create(model)
def create(self, model: TModel):
"""Create the model data as a row into sheet"""
try:
serialized_ = self.converter.unstructure(model)
serialized = self._convert_to_list(serialized_)
body = {"values": [serialized]}
response = self.client.append(
spreadsheetId=self.spreadsheet_id,
range=self.range,
insertDataOption="INSERT_ROWS",
valueInputOption="RAW",
body=body,
).execute()
except (TypeError, AttributeError):
raise SheetError("Could not insert row")
# something like "users!A6:F6"
updated_range = response["updates"]["updatedRange"]
match = re.match(fr"{self.sheet_name}!A(?P<row_id>\d+)", updated_range)
if not match:
raise SheetError("Could not determine row_id")
model.id = int(match.group("row_id"))
def rows(self) -> Sequence[TModel]:
"""Retrieve rows from sheet"""
try:
response = self.client.get(
spreadsheetId=self.spreadsheet_id, range=self.range
).execute()
except googleapiclient.errors.HttpError as ex:
raise SheetError(str(ex))
try:
rows = response["values"]
data = self._convert_to_dict(rows)
# ignoring type (mypy bug?) "Name 'self.structure' is not defined"
response = self.converter.structure(
data, Sequence[self.structure] # type: ignore
)
except (TypeError, AttributeError) as ex:
raise SheetError(str(ex))
return response
def update(self, model: TModel):
"""Update user"""
try:
serialized_ = self.converter.unstructure(model)
serialized = self._convert_to_list(serialized_)
body = {"values": [serialized]}
self.client.update(
spreadsheetId=self.spreadsheet_id,
range=f"{self.sheet_name}!A{model.id}:end",
valueInputOption="RAW",
body=body,
).execute()
except (TypeError, AttributeError):
raise SheetError("Could not update row")
def find(
self,
value: Union[str, bool, int, datetime.datetime, None],
*,
key: Optional[str] = None,
) -> Optional[TModel]:
key = key if key else self.key
ret = None
for row in self.rows():
if getattr(row, key) == value:
ret = row
break
return ret
def _convert_to_dict(self, data) -> List[Dict[str, str]]:
"""Given a list of lists where the first list contains key names, convert it to
a list of dictionaries.
"""
header = data[0]
header.insert(0, "id")
result: List[Dict[str, str]] = []
# Google Sheets are 1 indexed, with the first row being the header.
header_offset = 2
for index, row in enumerate(data[1:]):
row.insert(0, index + header_offset) # id value
row_tuples = itertools.zip_longest(header, row, fillvalue="")
result.append(dict(row_tuples))
return result
def _convert_to_list(
self, data: Dict[str, Union[str, int]]
) -> Sequence[Union[str, int]]:
"""Given a dictionary, return a list containing its values. The 'id' key is dropped
since it's not part of the schema
"""
data.pop("id")
return list(data.values())
@attr.s(auto_attribs=True, kw_only=True)
class User(Model):
first_name: str = ""
last_name: str = ""
email: str = ""
date_created: datetime.datetime = attr.ib(
default=attr.Factory(lambda: datetime.datetime.now(tz=datetime.timezone.utc))
)
organization: str = ""
role: str = ""
tshirt_size: str = ""
client_id: str = ""
client_secret: str = ""
setup_link: str = ""
class Users(WhollySheet[User]):
def __init__(self, *, client, spreadsheet_id: str):
super().__init__(
client=client,
spreadsheet_id=spreadsheet_id,
sheet_name="users",
structure=User,
key="email",
)
@attr.s(auto_attribs=True, kw_only=True)
class Hackathon(Model):
name: str
label: str
location: str
date: datetime.datetime
duration_in_days: int
class Hackathons(WhollySheet[Hackathon]):
def __init__(self, *, client, spreadsheet_id: str):
super().__init__(
client=client,
spreadsheet_id=spreadsheet_id,
sheet_name="hackathons",
structure=Hackathon,
key="name",
)
def get_upcoming(
self, *, cutoff: Optional[datetime.datetime] = None
) -> Sequence[Hackathon]:
now = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(
days=1
)
ret = []
for hackathon in self.rows():
if hackathon.date < now:
continue
if cutoff and hackathon.date > cutoff:
continue
ret.append(hackathon)
return ret
@attr.s(auto_attribs=True, kw_only=True)
class Registrant(Model):
user_email: str
hackathon_name: str
date_registered: Optional[datetime.datetime] = None
attended: Optional[bool] = None
class Registrations(WhollySheet[Registrant]):
def __init__(self, *, client, spreadsheet_id: str):
super().__init__(
client=client,
spreadsheet_id=spreadsheet_id,
sheet_name="registrations",
structure=Registrant,
key="hackathon_name",
)
def is_registered(self, registrant: Registrant) -> bool:
"""Check if registrant is already registerd"""
registrants = super().rows()
registered = False
for r in registrants:
if (
r.user_email == registrant.user_email
and r.hackathon_name == registrant.hackathon_name
):
registered = True
return registered
def register(self, registrant: Registrant):
"""Register user by inserting registrant details into registrations sheet"""
registrant.date_registered = datetime.datetime.now(tz=datetime.timezone.utc)
super().create(registrant)
class SheetError(Exception):
"""Improperly formatted data to deserialize"""
converter.register_structure_hook(
datetime.datetime, lambda d, _: datetime.datetime.fromisoformat(d) # type: ignore
)
converter.register_unstructure_hook(
datetime.datetime, lambda d: d.isoformat() # type: ignore
)
def _convert_bool(val: str, _: bool) -> Optional[bool]:
converted: Optional[bool]
if val.lower() in ("yes", "y", "true", "t", "1"):
converted = True
elif val.lower() in ("", "no", "n", "false", "f", "0", "null", "na"):
converted = False
elif val.lower() == NIL:
converted = None
else:
raise TypeError(f"Failed to convert '{val}' to bool")
return converted
converter.register_unstructure_hook(type(None), lambda t: NIL)
converter.register_structure_hook(bool, _convert_bool)
if __name__ == "__main__":
sheets = Sheets(spreadsheet_id="SHEET_ID", cred_file="CREDS_FILE")
|
looker-open-source/sdk-examples
|
python/hackathon_app/sheets.py
|
Python
|
mit
| 10,498
|
class Random:
def __init__(self, seed = 2):
self.a = 10000007
self.b = 31
self.salt = 0xdeadbeef
self.x = seed
def Rand(self):
self.x = self.x*self.a + self.b
self.x ^= self.salt;
self.x %= 10000000000
return self.x
|
mudream4869/crpg
|
testdata/test1/scripts/Tool.py
|
Python
|
mit
| 289
|
"""
The search library of ORB provides basic search functionality to all ORB models. It will also provide a
base class for more advanced searching capabilities such as AWS or Elasticsearch to be applied to particular models
during development.
"""
import orb
import logging
import re
import pyparsing
from collections import defaultdict
from projex.addon import AddonManager
log = logging.getLogger(__name__)
class Node(list):
def __eq__(self, other):
return list.__eq__(self, other) and self.__class__ == other.__class__
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, list.__repr__(self))
@classmethod
def group(cls, expr):
def group_action(s, l, t):
try:
lst = t[0].asList()
except (IndexError, AttributeError), e:
lst = t
return [cls(lst)]
return pyparsing.Group(expr).setParseAction(group_action)
class TextNode(Node):
def pattern(self, thesaurus, locale):
return thesaurus.synonyms(self[0].replace('*', '.*'), locale=locale)
class ExactNode(Node):
def pattern(self, thesaurus, locale):
return re.escape(self[0])
class ComparisonNode(Node): pass
# --------------------
# define the basic parser
# define printable options
UNICODE_PRINTABLES = u''.join(unichr(c) for c in xrange(65536) if not unichr(c).isspace())
# lookup basic phrases words
TEXT_OP = TextNode.group(pyparsing.Word(UNICODE_PRINTABLES))
TEXT_OP.setResultsName('word')
# lookup exact matches
EXACT_OP = ExactNode.group(pyparsing.QuotedString('"', unquoteResults=True, escChar='\\'))
EXACT_OP.setResultsName('exact')
TERM_OP = EXACT_OP | TEXT_OP
# lookup comparisons (column values)
COMPARISON_NAME = pyparsing.Word(UNICODE_PRINTABLES, excludeChars=':')
COMPARISON_OP = ComparisonNode.group(COMPARISON_NAME + pyparsing.Literal(':') + TERM_OP)
# create the search operator
BASIC_PARSER = pyparsing.OneOrMore(COMPARISON_OP | TERM_OP)
# --------------------
BASIC_SYNONYMS = {
'en_US': [
('is not', "isn't"),
('has not', "hasn't")
]
}
class SearchThesaurus(object):
def __init__(self, synonyms=None):
synonyms = synonyms or BASIC_SYNONYMS
self.__synonyms = defaultdict(dict)
for locale, pairings in synonyms.items():
for pairing in pairings:
expr = u'({0})'.format('|'.join(pairing))
for word in pairing:
self.__synonyms[locale][word] = expr
def synonyms(self, word, locale='en_US'):
return self.__synonyms[locale].get(word, word)
# --------------------
class SearchEngine(AddonManager):
def __init__(self, parser=None, thesaurus=None):
self.__parser = parser or BASIC_PARSER
self.__thesaurus = thesaurus or BASIC_THESAURUS
def parser(self):
return self.__parser
def search(self, model, terms, **context):
search_context = context.get('context') or orb.Context(**context)
locale = search_context.locale
nodes = self.__parser.parseString(terms)
# separate into 2 categories general (searchable by any column) and specific (user gave a column)
general_nodes = [node for node in nodes if not isinstance(node, ComparisonNode)]
comparison_nodes = [node for node in nodes if isinstance(node, ComparisonNode)]
# build general search column matches
q = orb.Query()
if general_nodes:
expr = u'.*\s{0}'
pattern = u'(^|.*\s){0}'.format(general_nodes[0].pattern(self.__thesaurus, locale))
pattern += ''.join(expr.format(node.pattern(self.__thesaurus, locale)) for node in general_nodes[1:])
general_q = orb.Query()
searchable_columns = model.schema().columns(flags=orb.Column.Flags.Searchable).values()
# if there are no searchable columns, then there will be no search results
# so just return an empty collection
if not searchable_columns:
log.warning('{0} has no searchable columns'.format(model.schema().name()))
return orb.Collection()
for column in searchable_columns:
general_q |= orb.Query(column).asString().matches(pattern, caseSensitive=False)
q &= general_q
# build comparison nodes
if comparison_nodes:
schema = model.schema()
for node in comparison_nodes:
column = schema.column(node[0])
value_node = node[-1]
value = value_node[0]
q &= orb.Query(column) == value
if not q.isNull():
context['where'] = q & context.get('where')
return model.select(**context)
def thesaurus(self):
return self.__thesaurus
# register the global basic search engine that ORB uses
SearchEngine.registerAddon('basic', SearchEngine(thesaurus=SearchThesaurus()))
|
orb-framework/orb
|
orb/core/search.py
|
Python
|
mit
| 4,949
|
#Interface Import
from model.base.basequeue import basequeue
#Support Data-Structures Imports
from model.linkedlist import SimpleLinkedList as LinkedList
from collections import deque
class QueueLinkedList(LinkedList, basequeue):
def enqueue(self, element):
"""
Enqueues the element at the end of the queue.
enqueue(element) -> None
@type element: object
@param element: element to be enqueued at the end of the queue.
"""
self.add_as_last(element)
def dequeue(self):
"""
Dequeues and deletes the first element from the queue.
dequeue() -> first_element
@rtype: object
@return: first element of the queue.
"""
return self.pop_first()
class QueueDeque(basequeue):
def __init__(self):
self._q = deque()
def is_empty(self):
"""
Returns True if the queue is empty, otherwise False.
is_empty() -> True/False
@rtype: boolean
@return: True is the stack is empty, otherwise False.
"""
return len(self._q) == 0
def enqueue(self, element):
"""
Enqueues the element at the end of the queue.
enqueue(element) -> None
@type element: object
@param element: element to be enqueued at the end of the queue.
"""
self._q.append(element)
def get_first(self):
"""
Returns the first element of the queue.
get_first() -> first_element
@rtype: object
@return: first element of the queue.
"""
return None if len(self._q) == 0 else self._q[0]
#Override
def dequeue(self):
"""
Dequeues and deletes the first element from the queue.
dequeue() -> first_element
@rtype: object
@return: first element of the queue.
"""
return None if len(self._q) == 0 else self._q.popleft()
def __repr__(self):
return str(self._q)[6 : -1]
def __str__(self):
return self.__repr__()
def __test(queue):
"""
Queue Test.
__test(queue) -> None
@type queue: basequeue
@param queue: queue instance.
"""
if not isinstance(queue, basequeue):
raise TypeError("Expected type was Queue.")
print "### iPATH TEST DATA STRUCTURE"
print "### Data Type: Queue ({})".format(str(queue.__class__.__bases__[0].__name__))
print "### Implementation: {}".format(str(queue.__class__.__name__))
print "\n*** ENQUEUE ***\n"
for i in range(10):
print "enqueue({})".format(str(i))
queue.enqueue(i)
print "\n{}\n".format(str(queue))
print "\n*** DEQUEUE ***\n"
for i in range(2):
print "dequeue(): {}\n".format(str(queue.dequeue()))
print "\n{}\n".format(str(queue))
print "\n*** GET FIRST ***\n"
print "get_first(): {}\n".format(str(queue.get_first()))
print "\n{}\n".format(str(queue))
print "\n*** EMPTYING ***\n"
while not queue.is_empty():
queue.dequeue()
print "{}".format(str(queue))
print "\n### END OF TEST ###\n"
if __name__ == "__main__":
queue = QueueLinkedList()
__test(queue)
queue = QueueDeque()
__test(queue)
|
gmarciani/ipath
|
model/queue.py
|
Python
|
mit
| 3,466
|
from BaseScouting.views.base_views import BaseAddTeamCommentsView
from Scouting2011.model.reusable_models import Team, TeamComments
class AddTeamCommentsView2011(BaseAddTeamCommentsView):
def __init__(self):
BaseAddTeamCommentsView.__init__(self, Team, TeamComments, 'Scouting2011:view_team')
|
ArcticWarriors/scouting-app
|
ScoutingWebsite/Scouting2011/view/submission/add_team_comments.py
|
Python
|
mit
| 307
|
#
#
#
# ***********************************************************************
# *** not used by scseq. Needed for compatibility with CGATPipelines. ***
# ***********************************************************************
#
#
#
# -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 7 11:33:27 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import CGATPipelines.Pipeline as P
import CGATPipelines
################################################################
# Options related to CGAT pipelines
# path were documentation source resides.
# Use environment variable SPHINX_DOCSDIR.
# If unset, take the location of CGATPipelines
docsdir = os.environ.get(
"SPHINX_DOCSDIR",
os.path.join(os.path.dirname(CGATPipelines.__file__),
'pipeline_docs'))
if not os.path.exists(docsdir):
raise ValueError("documentation directory '%s' not found" % docsdir)
themedir = os.path.join(os.path.dirname(CGATPipelines.__file__),
'pipeline_docs',
'themes')
logopath = os.path.join(themedir, "cgat_logo.png")
################################################################
# Import pipeline configuration from pipeline.ini in the current
# directory and the common one.
# PATH were code for pipelines is stored
pipelinesdir = os.path.dirname(CGATPipelines.__file__)
# The default configuration file - 'inifile' is read by
# sphinx-report.
inifile = os.path.join(os.path.dirname(CGATPipelines.__file__),
'configuration',
'pipeline.ini')
PARAMS = P.getParameters([inifile, "pipeline.ini"])
def setup(app):
app.add_config_value('PARAMS', {}, True)
################################################################
################################################################
################################################################
# The pipeline assumes that sphinxreport is called within the
# working directory. If the report is in a separate build directory,
# change the paths below.
#
# directory with export directory from pipeline
# This should be a directory in the build directory - you can
# link from here to a directory outside the build tree, though.
exportdir = os.path.abspath(PARAMS['exportdir'])
datadir = os.path.abspath(PARAMS['datadir'])
################################################################
################################################################
################################################################
# sphinx options
################################################################
# General information about the project.
project = PARAMS['projectname']
copyright = PARAMS['copyright']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = PARAMS['version']
# The full version, including alpha/beta/rc tags.
release = PARAMS['release']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path = [os.path.abspath('.'),
pipelinesdir,
os.path.abspath('%s/trackers' % docsdir)] + sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'CGATReport.only_directives',
'CGATReport.report_directive',
'sphinx.ext.inheritance_diagram',
'CGATReport.errors_directive',
'CGATReport.warnings_directive',
'CGATReport.roles']
if P.CONFIG.has_section('intersphinx'):
intersphinx_mapping = dict(
[(x, (os.path.abspath(y), None))
for x, y in P.CONFIG.items('intersphinx')])
# Included at the end of each rst file
rst_epilog = '''
.. _CGAT: http://www.cgat.org
.. _CGAT Training Programme: http://www.cgat.org
.. _CGAT Pipelines: https://www.cgat.org/downloads/public/cgat/documentation/Pipelines.html#pipelines
.. _CGAT Scripts: https://www.cgat.org/downloads/public/cgat/documentation/cgat.html#cgat
.. _pysam: http://code.google.com/p/pysam/
.. _samtools: http://samtools.sourceforge.net/
.. _tabix: http://samtools.sourceforge.net/tabix.shtml/
.. _Galaxy: https://main.g2.bx.psu.edu/
.. _cython: http://cython.org/
.. _python: http://python.org/
.. _pyximport: http://www.prescod.net/pyximport/
.. _sphinx: http://sphinx-doc.org/
.. _ruffus: http://www.ruffus.org.uk/
.. _sphinxreport: http://code.google.com/p/sphinx-report/
.. _sqlite: http://www.sqlite.org/
.. _make: http://www.gnu.org/software/make
.. _UCSC: http://genome.ucsc.edu
.. _ENSEMBL: http://www.ensembl.org
.. _GO: http://www.geneontology.org
.. _gwascatalog: http://www.genome.gov/gwastudies/
.. _distlid: http://distild.jensenlab.org/
.. _mysql: https://mariadb.org/
.. _postgres: http://www.postgresql.org/
.. _bedtools: http://bedtools.readthedocs.org/en/latest/
.. _UCSC Tools: http://genome.ucsc.edu/admin/git.html
.. _git: http://git-scm.com/
.. _sge: http://wikis.sun.com/display/GridEngine/Home
.. _alignlib: https://github.com/AndreasHeger/alignlib
'''
# Add any paths that contain templates here, relative to this directory.
# Add any paths that contain templates here, relative to this directory.
templates_path = [os.path.relpath('%s/_templates' % docsdir)]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'test'
copyright = u'2014, %CGAT%'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ["**/.*.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cgat'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [themedir]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = logopath
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'testdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'test.tex', u'test Documentation',
u'\\%CGAT\\%', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'test', u'test Documentation',
[u'%CGAT%'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'test', u'test Documentation',
u'%CGAT%', 'test', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
snsansom/xcell
|
pipelines/configuration/conf.py
|
Python
|
mit
| 13,376
|
"""
Examples:
To smear an already smeared spectrum with a light yield of 200 to a
a light yield of 190 then the following lines are required::
>>> smearer = smear.SmearEnergySmearLY()
>>> ly = smearer.calc_smear_ly(190., cur_ly=200.)
>>> smearer.set_resolution(ly)
>>> smeared_spec = smearer.weighted_smear(spectrum)
.. note:: Similar methods are available in all other smearing classes.
"""
import numpy
import itertools
import copy
class Smear(object):
""" The base class for smearing spectra.
Args:
name (string): The name of the smearing class.
Attributes:
_name (string): name of the smeaing class.
_num_sigma (float): The width of the window in terms of number of sigma
you wish to apply weights to.
"""
def __init__(self, name):
""" Initialise the Smear class by seeding the random number generator.
"""
numpy.random.seed()
self._name = name
self._num_sigma = 5.
def calc_gaussian(self, x, mean, sigma):
""" Calculates the value of a gaussian whose integral is equal to
one at position x with a given mean and sigma.
Args:
x : Position to calculate the gaussian
mean : Mean of the gaussian
sigma : Sigma of the gaussian
Returns:
float: Value of the gaussian at the given position
"""
return (numpy.exp(-(x - mean) ** 2 / (2 * sigma ** 2)) /
(sigma*numpy.sqrt(2 * numpy.pi)))
def get_bin_mean(self, low, bin, width):
""" Calculates the mean value of a bin.
Args:
low (float): The lower bound value of the parameter.
bin (int): The number of the bin you wish to calculate the mean of.
width (float): The width of the bin.
Returns:
float: The mean value of the bin.
"""
return low + (bin + 0.5)*width
def get_num_sigma(self):
""" Returns the width of the window in terms of number of sigma
you wish to apply weights to.
Returns:
float: The number of sigma.
"""
return self._num_sigma
def set_num_sigma(self, num_sigma):
""" Sets the width of the window in terms of number of sigma
you wish to apply weights to.
Args:
num_sigma (float): The number of sigma you wish to apply weights to.
Raises:
ValueError: If the number of sigma is zero or negative.
"""
if (num_sigma > 0.):
self._num_sigma = float(num_sigma)
else:
raise ValueError("%s is an invalid num_sigma. Value must be "
"greater than zero." % num_sigma)
def get_bounds(self, mean, sigma):
""" Calculates the boundaries you wish to apply the smearing
weights to.
Args:
mean (float): The mean value you are smearing.
sigma (float): The sigma of the gaussian you are using to smear.
Returns:
tuple: First value of the tuple is the lower bound. The second is
the upper bound.
"""
low = mean - self._num_sigma*sigma
high = mean + self._num_sigma*sigma
return low, high
class EnergySmearLY(Smear):
""" The class which smears energy. It accepts resolution in terms of light
yield (LY) in units of NHit per MeV.
Args:
poisson (bool): If True, use poisson smearing.
Attributes:
_light_yield (float): The light yield of the scintillator in NHits per
MeV.
_poisson_smear (Bool): True if poisson smearing is to be applied. False
if gaussian smearing is to be applied.
"""
def __init__(self, poisson=True):
""" Initialises the class.
"""
super(EnergySmearLY, self).__init__("energy_light_yield")
self._poisson_smear = poisson
self._light_yield = 200 # Nhit/MeV
self._log_factorial = {}
def calc_poisson_energy(self, x, lamb):
""" Calculates the value of a poisson whose integral is equal to
one at position x with a given lambda value.
Args:
x : Number of events
lamb : Lambda of the poisson
Returns:
float: The value of the poisson at the given position
"""
photons = int(x*self._light_yield)
expected = lamb*self._light_yield
if photons not in self._log_factorial:
self._log_factorial[photons] = (
numpy.sum(numpy.log(numpy.arange(1, (photons+1)))))
log_pois = (photons*numpy.log(expected) -
self._log_factorial[photons] -
expected)
return numpy.exp(log_pois)
def calc_smear_ly(self, new_ly, cur_ly=None):
"""Calculates the value of light yield (ly) required to smear a
data set which has already been smeared with a light yield of cur_ly
to achieve a smeared data set with a new light yield of new_ly.
Args:
new_ly (float): The value of light yield wanted for the smeared PDF.
cur_ly (float, optional): Current value of light yield the PDF
has been convolved with from the true value PDF.
Raises:
ValueError: If new_ly is smaller than cur_sigma. Can't smear to
higher light yields (smaller sigmas)
Returns:
float: The value of light yield needed to smear the current
PDF to obtain a new light yield: new_ly.
"""
if not cur_ly:
cur_ly = self.get_resolution()
if new_ly > cur_ly:
raise ValueError("New light yield must be smaller than the"
"current light yield. cur_ly: %s. new_ly: %s."
% (cur_ly, new_ly))
return new_ly*cur_ly/(cur_ly-new_ly)
def get_resolution(self):
""" Returns the light yield.
Returns:
float: The light yield.
"""
return self._light_yield
def get_sigma(self, energy):
""" Calculates sigma at a given energy.
Args:
energy (float): Energy value of data point(s)
Returns:
float: Sigma equivalent to sqrt(energy/_light_yield)
"""
return numpy.sqrt(energy/self._light_yield)
def set_resolution(self, light_yield):
""" Sets the light yield
Args:
light_yield (float): The value you wish to set the light yield to.
Raises:
ValueError: If the light yield is zero or negative.
"""
if light_yield > 0.:
self._light_yield = float(light_yield)
else:
raise ValueError("%s is an invalid light yield. Light yield "
"must be greater than zero.")
def weighted_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
calculating a Gaussian PDF for each bin. Weights are then
applied to a window of width specified by the number of sigma
depending on the value of the Gaussian PDF at the mean of the
bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish
to smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = spectrum._name + "_ly" + str(self._light_yield)
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
low = None
high = None
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
sigma = self.get_sigma(mean)
low, high = self.get_bounds(mean, sigma)
low = spectrum.get_config().get_par(
par).round(low - 0.5 * widths[i]) + 0.5 * widths[i]
high = spectrum.get_config().get_par(
par).round(high + 0.5 * widths[i]) + \
0.5 * widths[i]
if low < spectrum.get_config().get_par(par)._low:
low = spectrum.get_config().get_par(par)._low + \
0.5 * widths[i]
if high > spectrum.get_config().get_par(par)._high:
high = spectrum.get_config().get_par(par)._high - \
0.5 * widths[i]
weights = []
for energy in numpy.arange(low, high, widths[i]):
if self._poisson_smear is True:
weights.append(self.calc_poisson_energy(energy,
mean))
else:
weights.append(self.calc_gaussian(energy,
mean,
sigma))
else:
data[par_names[i]] = mean
total_weight = sum(weights)
i = 0
for energy in numpy.arange(low, high, widths[idx]):
data[par] = energy
smeared_spec.fill(weight=entries*weights[i]/total_weight,
**data)
i += 1
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
def random_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
generating a number of random points from Gaussian PDF generated
from that bins mean value and the corresponding sigma. The number
of points generated is equivalent to the number of entries in that
bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = spectrum._name + "_ly" + str(self._light_yield)
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = int(spectrum._data[bin])
if entries:
data = {}
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
mean_e = mean
sigma = self.get_sigma(mean)
else:
data[par_names[i]] = mean
for i in range(entries):
if self._poisson_smear is True:
photons = (numpy.fabs
(numpy.random.poisson(mean_e *
self._light_yield)))
data[par] = photons / float(self._light_yield)
else:
data[par] = (numpy.fabs
(numpy.random.normal(mean_e, sigma)))
try:
smeared_spec.fill(**data)
except ValueError:
print "WARNING: Smeared energy out of bounds. Skipping"
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
class EnergySmearRes(Smear):
""" Allows you to smear directly by supplied energy resolution
(in :math:`\sqrt{MeV}`).
Inherits from :class:`Smear`
Args:
poisson (bool): If True, use poisson smearing.
Attributes:
_energy_resolution (float): Energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`.
_poisson_smear (Bool): True if poisson smearing is to be applied.
False if gaussian smearing is to be applied.
"""
def __init__(self, poisson=True):
""" Initialise the class
"""
super(EnergySmearRes, self).__init__("energy_resolution")
self._poisson_smear = poisson
self._light_yield = 200 # Nhit/MeV
self._log_factorial = {}
def calc_poisson_energy(self, x, lamb):
""" Calculates the value of a poisson whose integral is equal to
one at position x with a given lambda value.
Args:
x : Number of events
lamb : Lambda of the poisson
Returns:
float: The value of the poisson at the given position
"""
photons = int(x*self._light_yield)
expected = lamb*self._light_yield
if photons not in self._log_factorial:
self._log_factorial[photons] = (
numpy.sum(numpy.log(numpy.arange(1, (photons+1)))))
log_pois = (photons*numpy.log(expected) -
self._log_factorial[photons] -
expected)
return numpy.exp(log_pois)
def calc_smear_resoluton(self, new_res, cur_res=None):
"""Calculates the value of resolution required to smear a data set
which has already been smeared with a resolution of cur_res to
achieve a new resolution of new_res.
Args:
new_res (float): The value of resolution wanted for the smeared PDF.
cur_res (float, optional): Current value of resolution the PDF
has been convolved with from the true value PDF.
Raises:
ValueError: If new_res is smaller than cur_sigma. Can't smear to
higher resolutions (smaller sigmas)
Returns:
float: The value of resolution needed to smear the current
PDF to obtain a new resolution with sigma value new_res.
"""
if not cur_res:
cur_res = self.get_resolution()
if new_res < cur_res:
raise ValueError("New resolution must be larger than the"
"current resolution. cur_res: %s. new_res: %s."
% (cur_res, new_res))
return numpy.fabs(numpy.sqrt(new_res**2 - cur_res**2))
def get_resolution(self):
""" Get the energy resolution
Returns:
float: Energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`
"""
return self._resolution
def get_sigma(self, energy):
""" Calculates sigma at a given energy.
Args:
energy (float): Energy value of data point(s)
Returns:
float: Sigma (MeV) equivalent to energy_resolution *
:math:`\sqrt{energy}`
"""
return self._resolution * numpy.power(energy, (1. / 2.))
def set_resolution(self, resolution):
""" Set the energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`.
Args:
resolution (float): Energy resolution in :math:`\sqrt{MeV}`
e.g. 0.05 for :math:`\sigma = 5\%/\sqrt{E[MeV]}`.
Raises:
ValueError: If the resolution is not between 0 and 1.
"""
if (resolution > 0. and resolution < 1.):
self._resolution = resolution
else:
raise ValueError("%s is an invalid energy resolution. Value "
"must be between 0. and 1." % resolution)
def weighted_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
calculating a Gaussian PDF for each bin. Weights are then applied
to a window of width specified by the number of sigma depending on
the value of the Gaussian PDF at the mean of the bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "%")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
low = None
high = None
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
sigma = self.get_sigma(mean)
low, high = self.get_bounds(mean, sigma)
low = spectrum.get_config().get_par(
par).round(low - 0.5 * widths[i]) + 0.5 * widths[i]
high = spectrum.get_config().get_par(
par).round(high + 0.5 * widths[i]) + \
0.5 * widths[i]
if low < spectrum.get_config().get_par(par)._low:
low = spectrum.get_config().get_par(par)._low + \
0.5 * widths[i]
if high > spectrum.get_config().get_par(par)._high:
high = spectrum.get_config().get_par(par)._high - \
0.5 * widths[i]
weights = []
for energy in numpy.arange(low, high, widths[i]):
if self._poisson_smear is True:
weights.append(self.calc_poisson_energy(energy,
mean))
else:
weights.append(self.calc_gaussian(energy,
mean,
sigma))
else:
data[par_names[i]] = mean
total_weight = sum(weights)
i = 0
for energy in numpy.arange(low, high, widths[idx]):
data[par] = energy
smeared_spec.fill(weight=entries*weights[i]/total_weight,
**data)
i += 1
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
def random_smear(self, spectrum, par="energy_mc"):
""" Smears the energy of a :class:`spectra.Spectra` by
generating a number of random points from Gaussian PDF
generated from that bins mean value and the corresponding
sigma. The number of points generated is equivalent to the
number of entries in that bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is energy_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "%")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = int(spectrum._data[bin])
if entries:
data = {}
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
mean_e = mean
sigma = self.get_sigma(mean)
else:
data[par_names[i]] = mean
for i in range(entries):
if self._poisson_smear is True:
photons = (numpy.fabs
(numpy.random.poisson(mean_e *
self._light_yield)))
data[par] = photons / float(self._light_yield)
else:
data[par] = (numpy.fabs
(numpy.random.normal(mean_e, sigma)))
try:
smeared_spec.fill(**data)
except ValueError:
print "WARNING: Smeared energy out of bounds. Skipping"
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
class RadialSmear(Smear):
""" The class which smears the radius. It accepts resolution in terms of
sigma in units of mm.
Args:
poisson (bool): If True, use poisson smearing.
Attributes:
_resolution (float): The position resolution (mm).
_poisson_smear (Bool): True if poisson smearing is to be applied.
False if gaussian smearing is to be applied.
"""
def __init__(self):
""" Initialises the class.
"""
super(RadialSmear, self).__init__("radial")
self._resolution = 100. # mm
def calc_smear_resoluton(self, new_res, cur_res=None):
"""Calculates the value of resolution required to smear a data set
which has already been smeared with a resolution of cur_res to
achieve a new resolution of new_res.
Args:
new_res (float): The value of resolution wanted for the smeared PDF.
cur_res (float, optional): Current value of resolution the PDF
has been convolved with from the true value PDF.
Raises:
ValueError: If new_res is smaller than cur_sigma. Can't smear to
higher resolutions (smaller sigmas)
Returns:
float: The value of resolution needed to smear the current
PDF to obtain a new resolution: new_res.
"""
if not cur_res:
cur_res = self.get_resolution()
if new_res < cur_res:
raise ValueError("New resolution must be larger than the"
"current resolution. cur_res: %s. new_res: %s."
% (cur_res, new_res))
return numpy.fabs(numpy.sqrt(new_res**2 - cur_res**2))
def get_resolution(self):
"""Gets the position resolution.
Returns:
float: Position resolution.
"""
return self._resolution
def set_resolution(self, resolution):
"""Sets the position resolution:
Raises:
ValueError: If resolution is zero or less.
Args:
resolution (float): Position resolution in mm.
"""
if resolution > 0:
self._resolution = resolution
else:
raise ValueError("%s is an incorrect position resolutioin. Value "
"must be greater than zero." % resolution)
def get_sigma(self):
"""Sigma and resolution are equivalent for radial dimensions
currently. This function calls self.get_resolution()
Returns:
float: Sigma in mm equivalent to resolution
"""
return self.get_resolution()
def weighted_smear(self, spectrum, par="radial_mc"):
""" Smears the radius of a :class:`spectra.Spectra` by
calculating a Gaussian PDF for each bin. Weights are then
applied to a window of width specified by the number of sigma
depending on the value of the Gaussian PDF at the mean of the
bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is radial_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "mm")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
low = None
high = None
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
sigma = self.get_sigma()
low, high = self.get_bounds(mean, sigma)
low = spectrum.get_config().get_par(
par).round(low - 0.5 * widths[i]) + 0.5 * widths[i]
high = spectrum.get_config().get_par(
par).round(high + 0.5 * widths[i]) + \
0.5 * widths[i]
if low < spectrum.get_config().get_par(par)._low:
low = spectrum.get_config().get_par(par)._low + \
0.5 * widths[i]
if high > spectrum.get_config().get_par(par)._high:
high = spectrum.get_config().get_par(par)._high - \
0.5 * widths[i]
weights = []
for radius in numpy.arange(low, high, widths[i]):
weights.append(self.calc_gaussian(radius,
mean,
sigma))
else:
data[par_names[i]] = mean
total_weight = sum(weights)
i = 0
for radius in numpy.arange(low, high, widths[idx]):
data[par] = radius
smeared_spec.fill(weight=entries*weights[i]/total_weight,
**data)
i += 1
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
def random_smear(self, spectrum, par="radial_mc"):
""" Smears the radius of a :class:`spectra.Spectra` by
generating a number of random points from Gaussian PDF
generated from that bins mean value and the corresponding
sigma. The number of points generated is equivalent to the
number of entries in that bin.
Args:
spectrum (:class:`spectra.Spectra`): Spectrum you wish to
smear.
par (string, optional): The name of the parameter you wish to
smear. The default is radial_mc.
Raises:
IndexError: If par is not in the specta config.
Returns:
:class:`spectra.Spectra`: The smeared spectrum
"""
if par not in spectrum.get_config().get_pars():
raise IndexError("%s is not a parameter in the spectrum" % par)
idx = spectrum.get_config().get_index(par)
bins = []
lows = []
widths = []
par_names = []
for par_name in spectrum.get_config().get_pars():
bins.append(range(spectrum.get_config().get_par(par_name)._bins))
lows.append(spectrum.get_config().get_par(par_name)._low)
widths.append(spectrum.get_config().get_par(par_name).get_width())
par_names.append(par_name)
smeared_spec = copy.copy(spectrum)
smeared_spec._name = (spectrum._name + "_" +
str(100. * self._resolution) + "mm")
smeared_spec._data = numpy.zeros(spectrum._data.shape)
for bin in itertools.product(*bins):
entries = spectrum._data[bin]
if entries:
data = {}
for i in range(len(bin)):
mean = self.get_bin_mean(lows[i], bin[i], widths[i])
if i == idx:
mean_r = mean
sigma = self.get_sigma()
else:
data[par_names[i]] = mean
for i in range(entries):
data[par] = numpy.fabs(numpy.random.normal(mean_r, sigma))
try:
smeared_spec.fill(**data)
except ValueError:
print "WARNING: Smeared radius out of bounds. Skipping"
smeared_spec._raw_events = spectrum._raw_events
return smeared_spec
|
jwaterfield/echidna
|
echidna/core/smear.py
|
Python
|
mit
| 32,311
|
__author__ = 'tfg'
|
ShakMR/suibash
|
String/__init__.py
|
Python
|
mit
| 19
|
# encoding: utf-8
from .compat import py2, py26, py3, py33, itervalues, iteritems, iterkeys, odict, range, str, unicode, total_seconds, zip
from .dictconfig import dictConfig
|
deKross/task
|
marrow/task/compat/__init__.py
|
Python
|
mit
| 175
|
# trello_webhooks package
|
yunojuno/django-test
|
trello_webhooks/__init__.py
|
Python
|
mit
| 26
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from ..lexer import RegexLexer, include, bygroups
from ..token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
.. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([\w$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([\w$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':\w+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[\w$]+/)*)([\w$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},():=.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
def analyse_text(text):
score = 0
if re.search(r'^\s*\.class\s', text, re.MULTILINE):
score += 0.5
if re.search(r'\b((check-cast|instance-of|throw-verification-error'
r')\b|(-to|add|[ais]get|[ais]put|and|cmpl|const|div|'
r'if|invoke|move|mul|neg|not|or|rem|return|rsub|shl|'
r'shr|sub|ushr)[-/])|{|}', text, re.MULTILINE):
score += 0.3
if re.search(r'(\.(catchall|epilogue|restart local|prologue)|'
r'\b(array-data|class-change-error|declared-synchronized|'
r'(field|inline|vtable)@0x[0-9a-fA-F]|generic-error|'
r'illegal-class-access|illegal-field-access|'
r'illegal-method-access|instantiation-error|no-error|'
r'no-such-class|no-such-field|no-such-method|'
r'packed-switch|sparse-switch))\b', text, re.MULTILINE):
score += 0.6
return score
|
facelessuser/sublime-markdown-popups
|
st3/mdpopups/pygments/lexers/dalvik.py
|
Python
|
mit
| 4,406
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/merek/shared_lair_merek_swamp.iff"
result.attribute_template_id = -1
result.stfName("lair_n","merek_swamp")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/lair/merek/shared_lair_merek_swamp.py
|
Python
|
mit
| 449
|
from math import cos, sin, pi
def distance(p1, p2):
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return (dx * dx + dy * dy)**0.5
def equals(p1, p2, epsilon=0.001):
return distance(p1, p2) <= epsilon
def add(p1, p2):
return p1[0] + p2[0], p1[1] + p2[1]
def subtract(p1, p2):
return p1[0] - p2[0], p1[1] - p2[1]
def scale(point, factor):
return vector[0] * factor, vector[1] * factor
memo = {}
def hash_key(currentLocation, phi, theta, arcsAvailable):
return '{0:0.5f},{1:0.5f},{2:0.5f},{3:0.5f},{4:d}'.format(currentLocation[0], currentLocation[1], phi, theta, arcsAvailable)
def memoize(currentLocation, phi, theta, arcsAvailable, value):
global memo
memo[hash_key(currentLocation, phi, theta, arcsAvailable)] = value
return value
###
# phi = current direction
###
def findPoint(point, currentLocation, phi, theta, arcsAvailable):
# Check if we've already calculated this value
global memo
key = hash_key(currentLocation, phi, theta, arcsAvailable)
if key in memo:
return memo[key]
# If we're out of moves, but we landed at the start, we've got a match
if arcsAvailable == 0 and equals(point, currentLocation):
return 1
# We're stil out of moves but not close by. Not a match
elif arcsAvailable == 0:
return 0
# Do some "pruning" to stop chasing paths that are too far away: If we're further away than we have steps left
# it's impossible to finish, so we're wasting our time
if distance(point, currentLocation) > theta * arcsAvailable:
return 0
# try both a left and right turn. These are essentially a closed form of a rotation matrix. I think you're just
# going to have to do the algebra...
leftDirection = cos(phi) * sin(theta) - sin(phi) * (1 - cos(theta)), sin(phi) * sin(theta) + cos(phi) * (1 - cos(theta))
leftLocation = add(currentLocation, leftDirection)
totalFromLeft = findPoint(point, leftLocation, phi + theta, theta, arcsAvailable - 1)
rightDirection = cos(phi) * sin(theta) - sin(phi) * (cos(theta) - 1), sin(phi) * sin(theta) + cos(phi) * (cos(theta) - 1)
rightLocation = add(currentLocation, rightDirection)
totalFromRight = findPoint(point, rightLocation, phi - theta, theta, arcsAvailable - 1)
#return totalFromLeft + totalFromRight
return memoize(currentLocation, phi, theta, arcsAvailable, totalFromLeft + totalFromRight)
# read the number of cases (at most 15)
N = int(raw_input().strip())
for i in range(N):
# read in input (at most 30 steps, with 7th of circle division)
stepCount, arcDistance = map(int, raw_input().strip().split())
# calculate the "rotation angle" for a single turn
theta = 2 * pi / arcDistance
print findPoint((0, 0), (0, 0), 0, theta, stepCount)
|
tylerburnham42/ProgrammingTeam
|
2016/slides/ComputationalGeometry/arc.py
|
Python
|
mit
| 2,650
|
# Copyright (C) 2016 Baofeng Dong
# This program is released under the "MIT License".
# Please see the file COPYING in the source
# distribution of this software for license terms.
import csv, os
from sqlalchemy import func, desc, distinct, cast, Integer
from flask import current_app, jsonify
from dashboard import Session as Session
from dashboard import debug, app, db
import simplejson as json
import pygal
app = current_app
DIRPATH = os.path.dirname(os.path.realpath(__file__))
class Helper(object):
@staticmethod
def get_routes():
ret_val = []
web_session = Session()
routes = web_session.execute("""
SELECT distinct rte, rte_desc
FROM odk.rte_lookup
ORDER BY rte;""")
RTE = 0
RTE_DESC = 1
ret_val = [ {'rte':str(route[RTE]), 'rte_desc':route[RTE_DESC]}
for route in routes ]
web_session.close()
debug(ret_val)
return ret_val
@staticmethod
def get_questions():
ret_val = []
web_session = Session()
questions = web_session.execute("""
SELECT num, questions
FROM odk.ques_lookup
ORDER BY num;""")
ret_val = [ [question[0], str(question[1])] for question in questions ]
web_session.close()
debug(ret_val)
return ret_val
@staticmethod
def get_directions():
ret_val = []
web_session = Session()
directions = web_session.execute("""
SELECT rte, rte_desc, dir, dir_desc
FROM odk.rte_lookup
ORDER BY rte, dir;""")
RTE = 0
RTE_DESC = 1
DIR = 2
DIR_DESC = 3
ret_val = [ {'rte':str(direction[RTE]), 'rte_desc':direction[RTE_DESC],
'dir':int(direction[DIR]), 'dir_desc':direction[DIR_DESC]}
for direction in directions ]
web_session.close()
return ret_val
@staticmethod
def query_sep_data(where):
ret_val = []
query_args = {}
where = where
region = " AND f.q5_orig_region='2' and f.q6_dest_region='2' "
validate = " AND f.loc_validated='1' "
not_null = " AND f.q3_orig_type is not null AND f.q4_dest_type is not null;"
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.origin_sep is not null {0}),
sepcount as (
select origin_sep,
count(*) as sep_count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by origin_sep
order by origin_sep)
select * from sepcount;""".format(where)
#query_string += where
#query_string += ;
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
SEP = 0
COUNT = 1
PER = 2
# each record will be converted as json
# and sent back to page
for record in query:
data = {}
data['sep'] = record[SEP]
data['count'] = record[COUNT]
data['percentage'] = float(record[PER])
ret_val.append(data)
web_session.close()
return ret_val
@staticmethod
def get_satisfaction(where, qnum):
ret_val = []
where = where
pie_chart = pygal.Pie(print_values=True)
pie_chart.title = 'Customer Satisfaction'
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.q1_satisfaction is not null {0}),
satisfactioncount as (
select
CASE
WHEN q1_satisfaction = '1' THEN 'Very satisfied'
WHEN q1_satisfaction = '3' THEN 'Somewhat satisfied'
WHEN q1_satisfaction = '4' THEN 'Neutral'
WHEN q1_satisfaction = '5' THEN 'Somewhat dissatisfied'
WHEN q1_satisfaction = '6' THEN 'Very dissatisfied'
WHEN q1_satisfaction = '7' THEN 'Do not know'
else ''
END as satisfaction,
count(*) as count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by q1_satisfaction
order by pct desc)
select * from satisfactioncount;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
# each record will be converted as json
# and sent back to page
ret_val = [[record[0], int(record[1]), float(record[2])] for record in query]
debug(ret_val)
for row in ret_val:
pie_chart.add(str(row[0]), float(row[2]))
pie_chart.render_to_file(os.path.join(DIRPATH, "static/image/{0}{1}.svg".format('q', qnum)))
web_session.close()
return ret_val
@staticmethod
def get_origin(where, qnum):
ret_val = []
where = where
pie_chart = pygal.Pie(print_values=True)
pie_chart.title = 'Trip Origin Types'
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.q3_orig_type is not null {0}),
origincount as (
select
case
WHEN q3_orig_type = '1' THEN 'Home'
WHEN q3_orig_type = '2' THEN 'Work'
WHEN q3_orig_type = '3' THEN 'School'
WHEN q3_orig_type = '4' THEN 'Recreation'
WHEN q3_orig_type = '5' THEN 'Shopping'
WHEN q3_orig_type = '6' THEN 'Personal business'
WHEN q3_orig_type = '7' THEN 'Visit family or friends'
WHEN q3_orig_type = '8' THEN 'Medical appointment'
WHEN q3_orig_type = '9' THEN 'Other'
else ''
end as origin_type,
count(*) as count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by q3_orig_type
order by pct desc)
select * from origincount;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
# each record will be converted as json
# and sent back to page
ret_val = [[record[0], int(record[1]), float(record[2])] for record in query]
debug(ret_val)
for row in ret_val:
pie_chart.add(str(row[0]), float(row[2]))
pie_chart.render_to_file(os.path.join(DIRPATH, "static/image/{0}{1}.svg".format('q', qnum)))
web_session.close()
return ret_val
@staticmethod
def get_destination(where, qnum):
ret_val = []
where = where
bar_chart = pygal.Bar(print_values=True)
bar_chart.title = 'Trip Destination Types'
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.q4_dest_type is not null {0}),
destcount as (
select
case
WHEN q4_dest_type = '1' THEN 'Home'
WHEN q4_dest_type = '2' THEN 'Work'
WHEN q4_dest_type = '3' THEN 'School'
WHEN q4_dest_type = '4' THEN 'Recreation'
WHEN q4_dest_type = '5' THEN 'Shopping'
WHEN q4_dest_type = '6' THEN 'Personal business'
WHEN q4_dest_type = '7' THEN 'Visit family or friends'
WHEN q4_dest_type = '8' THEN 'Medical appointment'
WHEN q4_dest_type = '9' THEN 'Other'
else ''
end as dest_type,
count(*) as count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by q4_dest_type
order by pct desc)
select * from destcount;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
# each record will be converted as json
# and sent back to page
ret_val = [[record[0], int(record[1]), float(record[2])] for record in query]
debug(ret_val)
for row in ret_val:
bar_chart.add(str(row[0]), int(row[1]))
bar_chart.render_to_file(os.path.join(DIRPATH, "static/image/{0}{1}.svg".format('q', qnum)))
web_session.close()
return ret_val
@staticmethod
def get_travel_change(where, qnum):
ret_val = []
where = where
bar_chart = pygal.Bar(print_values=True)
bar_chart.title = 'Transit Usage Compared to A Year Ago'
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.q7_travel_change is not null {0}),
changecount as (
select
case
WHEN q7_travel_change = '1' THEN 'More'
WHEN q7_travel_change = '2' THEN 'Same'
WHEN q7_travel_change = '3' THEN 'Less'
WHEN q7_travel_change = '4' THEN 'Do not know'
else ''
end as ride_change,
count(*) as count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by q7_travel_change
order by pct desc)
select * from changecount;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
# each record will be converted as json
# and sent back to page
ret_val = [[record[0], int(record[1]), float(record[2])] for record in query]
debug(ret_val)
for row in ret_val:
bar_chart.add(str(row[0]), int(row[1]))
bar_chart.render_to_file(os.path.join(DIRPATH, "static/image/{0}{1}.svg".format('q', qnum)))
web_session.close()
return ret_val
@staticmethod
def get_travel_less(where, qnum):
ret_val = []
where = where
bar_chart = pygal.Bar(print_values=True)
bar_chart.title = 'Reasons for Riding TriMet Less'
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.q8_ride_less is not null {0}),
unnest_element as (
select unnest(string_to_array(q8_ride_less, ' ')) as element
from survey),
ridechange as (
select
case
WHEN element = '1' THEN 'Gasoline prices low'
WHEN element = '2' THEN 'Home changed'
WHEN element = '3' THEN 'Work changed'
WHEN element = '4' THEN 'School changed'
WHEN element = '5' THEN 'Life changed'
WHEN element = '6' THEN 'Telecommute more'
WHEN element = '7' THEN 'Take ride hailing services'
WHEN element = '8' THEN 'On time issues'
WHEN element = '9' THEN 'Frequency not enough'
WHEN element = '10' THEN 'Crowding issues'
WHEN element = '11' THEN 'Span of service not enough'
WHEN element = '12' THEN 'Fare prices too high'
WHEN element = '13' THEN 'Drive instead'
WHEN element = '14' THEN 'Bicycle instead'
WHEN element = '15' THEN 'Walk instead'
WHEN element = '16' THEN 'Other'
WHEN element = '17' THEN 'Do not know'
else ''
end as ride_less,
count(*) as count,
round(100*count(*)/(select count(*) from unnest_element)::numeric,1) as pct
from unnest_element
group by element
order by pct desc)
select * from ridechange;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
# each record will be converted as json
# and sent back to page
ret_val = [[record[0], int(record[1]), float(record[2])] for record in query]
debug(ret_val)
for row in ret_val:
bar_chart.add(str(row[0]), int(row[1]))
bar_chart.render_to_file(os.path.join(DIRPATH, "static/image/{0}{1}.svg".format('q', qnum)))
web_session.close()
return ret_val
@staticmethod
def query_zipcode_data(where):
ret_val = []
query_args = {}
where = where
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.origin_zip is not null {0}),
zipcount as (
select origin_zip,
count(*) as zip_count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by origin_zip
order by origin_zip)
select * from zipcount;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
ZIPCODE = 0
COUNT = 1
PER = 2
# each record will be converted as json
# and sent back to page
for record in query:
data = {}
data['zipcode'] = record[ZIPCODE]
data['count'] = record[COUNT]
data['percentage'] = float(record[PER])
ret_val.append(data)
web_session.close()
return ret_val
@staticmethod
def query_cty_data(where):
ret_val = []
query_args = {}
where = where
query_string = """
WITH survey as (
select *
from odk.fall_survey_2016_data f
where
f.willing in ('1','2') and
f.origin_cty is not null {0}),
ctycount as (
select origin_cty,
count(*) as cty_count,
round(100*count(*)/(select count(*) from survey)::numeric,1) as pct
from survey
group by origin_cty
order by origin_cty)
select * from ctycount;""".format(where)
debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
COUNTY = 0
COUNT = 1
PER = 2
# each record will be converted as json
# and sent back to page
for record in query:
data = {}
data['COUNTY'] = record[COUNTY]
data['count'] = record[COUNT]
data['percentage'] = float(record[PER])
ret_val.append(data)
web_session.close()
return ret_val
@staticmethod
def query_map_data(where):
ret_val = []
query_args = {}
where = where
region = " AND f.q5_orig_region='2' and f.q6_dest_region='2' "
validate = " AND f.loc_validated='1' "
not_null = " AND f.q3_orig_type is not null AND f.q4_dest_type is not null "
limit = "limit 2000;"
query_string = """
SELECT
f.rte,
r.rte_desc,
f.dir,
r.dir_desc,
CASE
WHEN q1_satisfaction = '1' THEN 'Very satisfied'
WHEN q1_satisfaction = '3' THEN 'Somewhat satisfied'
WHEN q1_satisfaction = '4' THEN 'Neutral'
WHEN q1_satisfaction = '5' THEN 'Somewhat dissatisfied'
WHEN q1_satisfaction = '6' THEN 'Very dissatisfied'
WHEN q1_satisfaction = '7' THEN 'Do not know'
else ''
end as satisfaction,
case
WHEN q3_orig_type = '1' THEN 'Home'
WHEN q3_orig_type = '2' THEN 'Work'
WHEN q3_orig_type = '3' THEN 'School'
WHEN q3_orig_type = '4' THEN 'Recreation'
WHEN q3_orig_type = '5' THEN 'Shopping'
WHEN q3_orig_type = '6' THEN 'Personal business'
WHEN q3_orig_type = '7' THEN 'Visit family or friends'
WHEN q3_orig_type = '8' THEN 'Medical appointment'
WHEN q3_orig_type = '9' THEN 'Other'
else ''
end as o_type,
case
WHEN q4_dest_type = '1' THEN 'Home'
WHEN q4_dest_type = '2' THEN 'Work'
WHEN q4_dest_type = '3' THEN 'School'
WHEN q4_dest_type = '4' THEN 'Recreation'
WHEN q4_dest_type = '5' THEN 'Shopping'
WHEN q4_dest_type = '6' THEN 'Personal business'
WHEN q4_dest_type = '7' THEN 'Visit family or friends'
WHEN q4_dest_type = '8' THEN 'Medical appointment'
WHEN q4_dest_type = '9' THEN 'Other'
else ''
end as d_type,
f.q5_orig_lat as o_lat,
f.q5_orig_lng as o_lng,
f.q6_dest_lat as d_lat,
f.q6_dest_lng as d_lng,
case
WHEN q7_travel_change = '1' THEN 'More'
WHEN q7_travel_change = '2' THEN 'Same'
WHEN q7_travel_change = '3' THEN 'Less'
WHEN q7_travel_change = '4' THEN 'Do not know'
else ''
end as ride_change,
case
WHEN q18_ridership = '1' THEN 'Frequent rider'
WHEN q18_ridership = '2' THEN 'Regular rider'
WHEN q18_ridership = '3' THEN 'Occasional rider'
WHEN q18_ridership = '4' THEN 'Infrequent rider'
WHEN q18_ridership = '5' THEN 'Do not know'
else ''
end as ridership,
case
WHEN q19_ride_years = '1' THEN 'Less than 1 year'
WHEN q19_ride_years = '2' THEN '1 to 2 years'
WHEN q19_ride_years = '3' THEN '3 to 5 years'
WHEN q19_ride_years = '4' THEN '6 to 10 years'
WHEN q19_ride_years = '5' THEN 'Over 10 years'
WHEN q19_ride_years = '6' THEN 'Do not know'
else ''
end as ride_years,
case
WHEN q20_approval = '1' THEN 'Strongly approve'
WHEN q20_approval = '2' THEN 'Somewhat approve'
WHEN q20_approval = '3' THEN 'Somewhat disapprove'
WHEN q20_approval = '4' THEN 'Strongly disapprove'
WHEN q20_approval = '5' THEN 'Do not know'
else ''
end as job_approval,
case
WHEN q21_one_change = '1' THEN 'Frequency improved'
WHEN q21_one_change = '2' THEN 'Reliability improved'
WHEN q21_one_change = '3' THEN 'Service expanded'
WHEN q21_one_change = '4' THEN 'Routes go to more places'
WHEN q21_one_change = '5' THEN 'Stops closer to my origin/destination'
WHEN q21_one_change = '6' THEN 'Crowding less'
WHEN q21_one_change = '7' THEN 'Faster trip'
WHEN q21_one_change = '8' THEN 'Transfer less'
WHEN q21_one_change = '9' THEN 'Safer trip'
WHEN q21_one_change = '10' THEN 'Fare less expensive'
WHEN q21_one_change = '11' THEN 'Other'
WHEN q21_one_change = '12' THEN 'Nothing'
WHEN q21_one_change = '13' THEN 'Do not know'
else ''
end as one_change,
coalesce(f.q24_zipcode::text, ''),
case
WHEN q25_age = '1' THEN 'Under 18'
WHEN q25_age = '2' THEN '18-24'
WHEN q25_age = '3' THEN '25-34'
WHEN q25_age = '4' THEN '35-44'
WHEN q25_age = '5' THEN '45-54'
WHEN q25_age = '6' THEN '55-64'
WHEN q25_age = '7' THEN '65 or more'
else ''
end as age,
case
WHEN q26_gender = '1' THEN 'Female'
WHEN q26_gender = '2' THEN 'Male'
WHEN q26_gender = '3' THEN 'Transgender'
WHEN q26_gender = '4' THEN 'Other'
else ''
end as gender,
case
WHEN q29_income = '1' THEN 'Under $10,000'
WHEN q29_income = '2' THEN '$10,000-$19,999'
WHEN q29_income = '3' THEN '$20,000-$29,999'
WHEN q29_income = '4' THEN '$30,000-$39,999'
WHEN q29_income = '5' THEN '$40,000-$49,999'
WHEN q29_income = '6' THEN '$50,000-$59,999'
WHEN q29_income = '7' THEN '$60,000-$69,999'
WHEN q29_income = '8' THEN '$70,000-$79,999'
WHEN q29_income = '9' THEN '$80,000-$89,999'
WHEN q29_income = '10' THEN '$90,000-$99,999'
WHEN q29_income = '11' THEN '$100,000-$124,999'
WHEN q29_income = '12' THEN '$125,000-$150,000'
WHEN q29_income = '13' THEN 'Over $150,000'
WHEN q29_income = '14' THEN 'Do not know'
else ''
end as income,
f.time_of_day,
to_char(f._date, 'Mon DD YYYY') as _date
from odk.fall_survey_2016_data f
join odk.rte_lookup r
on f.rte::integer = r.rte and f.dir::integer = r.dir """
query_string += where
query_string += region
query_string += validate
query_string += not_null
query_string += limit
#debug(query_string)
web_session = Session()
query = web_session.execute(query_string)
RTE = 0
RTE_DESC = 1
DIR = 2
DIR_DESC = 3
SATISFACTION = 4
OTYPE = 5
DTYPE =6
OLAT = 7
OLNG = 8
DLAT = 9
DLNG = 10
TRAVEL_CHANGE = 11
RIDERSHIP = 12
RIDE_YEARS = 13
JOB_APPROVAL = 14
ONE_CHANGE = 15
ZIPCODE = 16
AGE = 17
GENDER = 18
INCOME = 19
TOD = 20
DATE = 21
# each record will be converted as json
# and sent back to page
for record in query:
data = {}
data['rte'] = record[RTE]
data['rte_desc'] = record[RTE_DESC]
data['dir'] = record[DIR]
data['dir_desc'] = record[DIR_DESC]
data['satisfaction'] = record[SATISFACTION]
data['o_type'] = record[OTYPE]
data['d_type'] = record[DTYPE]
data['o_lat'] = float(record[OLAT])
data['o_lng'] = float(record[OLNG])
data['d_lat'] = float(record[DLAT])
data['d_lng'] = float(record[DLNG])
data['travel_change'] = record[TRAVEL_CHANGE]
data['ridership'] = record[RIDERSHIP]
data['ride_years'] = record[RIDE_YEARS]
data['job_approval'] = record[JOB_APPROVAL]
data['one_change'] = record[ONE_CHANGE]
data['zipcode'] = record[ZIPCODE]
data['age'] = record[AGE]
data['gender'] = record[GENDER]
data['income'] = record[INCOME]
data['time_of_day'] = record[TOD]
data['date'] = record[DATE]
ret_val.append(data)
web_session.close()
return ret_val
@staticmethod
def buildconditions(args):
where = ""
lookupwd = {
"Weekday": "(1,2,3,4,5)",
"Weekend": "(0,6)",
"Saturday": "(6)",
"Sunday": "(0)"
}
lookupvehicle = {
"MAX": "IN ('90','100','190','200','290')",
"WES": "IN ('203')",
"Bus": "NOT IN ('90','100','190','200','290','203')"
}
lookuprtetype = {
"MAX": "1",
"Bus Crosstown": "2",
"Bus Eastside Feeder": "3",
"Bus Westside Feeder": "4",
"Bus Radial": "5",
"WES": "6"
}
lookuptod = {
"Weekday Early AM": "1",
"Weekday AM Peak": "2",
"Weekday Midday": "3",
"Weekday PM Peak": "4",
"Weekday Night": "5",
"Weekend Morning": "6",
"Weekend Midday": "7",
"Weekend Night": "8"
}
lookupaddress = {
"Home": "1",
"Work": "2",
"School": "3",
"Recreation": "4",
"Shopping": "5",
"Personal business": "6",
"Visit family or friends": "7",
"Medical appointment": "8",
"Other": "9"
}
lookuptravel = {
"More" : "1",
"Same" : "2",
"Less": "3",
"Do not know" : "4"
}
lookupsatisfaction = {
"Satisfied": ("1","3"),
"Neutral": "('4')",
"Not satisfied": ("5","6"),
"Do not know": "('7')"
}
for key, value in args.items():
# app.logger.debug(key,value)
if not value: continue
if key == "rte" and value.isnumeric():
where += " AND f.rte='{0}'".format(value)
if key == "dir" and value.isnumeric():
where += " AND f.dir='{0}'".format(value)
if key == "day" and value in lookupwd:
where += " AND extract(dow from f._date) in {0}".format(lookupwd[value])
if key == "tod":
#debug(isinstance(value, str))
where += " AND f.time_of_day='{0}'".format(value)
if key == "vehicle" and value in lookupvehicle:
where += " AND rte {0}".format(lookupvehicle[value])
if key == "dest_sep":
where += " AND f.dest_sep='{0}'".format(value)
if key == "dest_zip":
where += " AND f.dest_zip='{0}'".format(value)
if key == "dest_cty":
where += " AND f.dest_cty='{0}'".format(value)
if key == "orig" and value in lookupaddress:
where += " AND f.q3_orig_type='{0}'".format(lookupaddress[value])
if key == "dest" and value in lookupaddress:
where += " AND f.q4_dest_type='{0}'".format(lookupaddress[value])
if key == "travel" and value in lookuptravel:
where += " AND f.q7_travel_change='{0}'".format(lookuptravel[value])
if key == "satisfaction" and value in lookupsatisfaction:
where += " AND f.q1_satisfaction in {0}".format(lookupsatisfaction[value])
return where
@staticmethod
def query_route_data(user='', rte_desc='', dir_desc='', csv=False):
ret_val = []
query_args = {}
where = ""
#if user: user = "%" + user + "%"
user_filter = " s.name = :user"
rte_desc_filter = " r.rte_desc = :rte_desc "
dir_desc_filter = " r.dir_desc = :dir_desc "
def construct_where(string, param, filt_name):
if not param:
return string
if filt_name == "user": filt = user_filter
elif filt_name == "rte_desc": filt = rte_desc_filter
else: filt = dir_desc_filter
if string:
return string + " AND " + filt
else:
return string + filt
# build where clause
debug(where)
for param in [(user, 'user'),(rte_desc, 'rte_desc'),(dir_desc, 'dir_desc')]:
where = construct_where(where, param[0], param[1])
debug(where)
query_args[param[1]] = param[0]
if where:
where = " WHERE " + where
limit = "LIMIT 300;"
if csv:
# add headers to csv data
ret_val.append(
['date','time','user','rte_desc','dir_desc','satisfaction', 'comments'])
limit = ";"
query_string = """
SELECT
r.rte_desc,
r.dir_desc,
f._date,
date_trunc('second',f._end) as _time,
s.name as user,
case
WHEN q1_satisfaction = '1' THEN 'Very satisfied'
WHEN q1_satisfaction = '3' THEN 'Somewhat satisfied'
WHEN q1_satisfaction = '4' THEN 'Neutral'
WHEN q1_satisfaction = '5' THEN 'Somewhat dissatisfied'
WHEN q1_satisfaction = '6' THEN 'Very dissatisfied'
WHEN q1_satisfaction = '7' THEN 'Do not know'
end as satisfaction,
coalesce(f.q2_satis_comments,'')
from odk.fall_survey_2016_data f
join odk.rte_lookup r
on f.rte::integer = r.rte and f.dir::integer = r.dir
join odk.surveyors s
on f._surveyor = s.username """
query_string += where
query_string += " ORDER BY f._date DESC, f._end DESC "
query_string += limit
debug(query_string)
web_session = Session()
query = web_session.execute(query_string, query_args)
RTE_DESC = 0
DIR_DESC = 1
DATE = 2
TIME = 3
USER = 4
SATISFACTION = 5
COMMENTS = 6
# each record will be converted as json
# and sent back to page
for record in query:
if csv:
data = []
data.append(str(record[DATE]))
data.append(str(record[TIME]))
data.append(record[USER])
data.append(record[RTE_DESC])
data.append(record[DIR_DESC])
data.append(record[SATISFACTION])
data.append(record[COMMENTS])
else:
data = {}
data['date'] = str(record[DATE])
data['time'] = str(record[TIME])
data['user'] = record[USER]
data['rte_desc'] = record[RTE_DESC]
data['dir_desc'] = record[DIR_DESC]
data['satisfaction'] = record[SATISFACTION]
data['comments'] = record[COMMENTS]
ret_val.append(data)
web_session.close()
return ret_val
@staticmethod
def get_users():
users = []
session = Session()
results = session.execute("""
SELECT name
FROM odk.surveyors
ORDER BY name;""")
for result in results:
print((dict(result)))
print("Type:", type(dict(result)))
user_dict = dict(result)
print(user_dict)
user = user_dict.get('name')
users.append(str(user))
session.close()
return users
@staticmethod
def get_user_data(date):
surveyordata = []
bar_chart = pygal.HorizontalBar(print_values=True)
bar_chart.title = 'Number of Surveys by Surveyor on {0}'.format(date)
web_session = Session()
results = web_session.execute("""
select
name,
string_agg(distinct route, ' || ') as routes,
count(route) as count,
round(count(route)*100/(select count(*) from odk.users_tod where _date=:date)::numeric,2) as pct
from odk.users_tod
where _date=:date
group by name
order by count desc;""",{'date':date})
for result in results:
print(result[0],result[1],result[2],result[3])
surveyordata.append([result[0],result[1],int(result[2]),float(result[3])])
bar_chart.add(result[0],int(result[2]))
web_session.close()
debug(surveyordata)
bar_chart.render_to_file(os.path.join(DIRPATH, "static/image/{0}{1}.svg".format('surveyors-', date)))
return surveyordata
|
miketung168/survey-dashboard
|
dashboard/helper.py
|
Python
|
mit
| 35,348
|
from typing import List, Any
import urllib3
from CommonServerPython import *
from math import ceil
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
''' CLIENT CLASS '''
class Client(BaseClient):
def get_domain_data(self, domain: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'full/{domain}',
params={}
)
def get_search_data(self, field: str, value: str, limit: int, page: int) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'domains/{field}/{value}',
params={'limit': limit,
'page': page}
)
def test_module(self):
return self._http_request(
method='GET',
url_suffix='domains/ip/8.8.8.8',
params={}
)
''' COMMAND FUNCTIONS '''
def test_module_command(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: the base client
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.test_module()
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise
return 'ok'
def domain_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param client: Hostio client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
domains = argToList(args.get('domain'))
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_data(domain)
if domain_data.get('web', {}).get('date'):
domain_date_dt = dateparser.parse(domain_data['web']['date'])
if domain_date_dt:
domain_data['updated_date'] = domain_date_dt.strftime(DATE_FORMAT)
score = Common.DBotScore.NONE
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HostIo',
indicator_type=DBotScoreType.DOMAIN,
score=score,
)
domain_standard_context = Common.Domain(
domain=domain,
updated_date=domain_data.get('updated_date', None),
name_servers=domain_data['web'].get('server', None),
registrant_name=domain_data['web'].get('title', None),
registrant_country=domain_data['web'].get('country', None),
registrant_email=domain_data['web'].get('email', None),
registrant_phone=domain_data['web'].get('phone', None),
dns=domain_data.get('dns', None),
dbot_score=dbot_score
)
readable_output = tableToMarkdown('Domain', domain_data)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HostIo.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
def search_command(client: Client, args: Dict[str, Any]) -> CommandResults:
field = args.get('field', None)
value = args.get('value', None)
limit = int(args.get('limit', 25))
data = client.get_search_data(field, value, limit, 0)
domains = data.get('domains', [])
total: int = data.get('total', 0)
read = tableToMarkdown(f'Domains associated with {field}: {value}', data)
if total == 0:
read = f'No Domains associated with {field}'
elif total > limit:
# set it as len domains since in trial its always 5
pages = ceil((total - len(domains)) / len(domains))
page = 1
while page <= pages:
data = client.get_search_data(field, value, limit, page)
domains += data.get('domains', [])
page += 1
data['domains'] = domains
context = {
'Field': field,
'Value': value,
'Domains': domains,
'Total': total
}
return CommandResults(
readable_output=read,
outputs_prefix='HostIo.Search',
outputs_key_field=['Field', 'Value'],
outputs=context,
raw_response=data)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
api_key = params.get('token')
base_url = urljoin(params['url'], '/api')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy,
)
if demisto.command() == 'test-module':
return_results(test_module_command(client))
elif demisto.command() == 'domain':
return_results(domain_command(client, demisto.args()))
elif demisto.command() == 'hostio-domain-search':
return_results(search_command(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
demisto/content
|
Packs/HostIo/Integrations/HostIo/HostIo.py
|
Python
|
mit
| 6,099
|
import unittest
from test.asserting.policy import PolicyAssertion, get_fixture_path
from vint.linting.level import Level
from vint.linting.policy.prohibit_implicit_scope_builtin_variable import (
ProhibitImplicitScopeBuiltinVariable,
)
PATH_VALID_VIM_SCRIPT = get_fixture_path(
'prohibit_implicit_scope_builtin_variable_valid.vim')
PATH_INVALID_VIM_SCRIPT = get_fixture_path(
'prohibit_implicit_scope_builtin_variable_invalid.vim')
class TestProhibitImplicitScopeBuiltinVariable(PolicyAssertion, unittest.TestCase):
def test_get_violation_if_found_when_file_is_valid(self):
self.assertFoundNoViolations(PATH_VALID_VIM_SCRIPT,
ProhibitImplicitScopeBuiltinVariable)
def create_violation(self, line, column):
return {
'name': 'ProhibitImplicitScopeBuiltinVariable',
'level': Level.WARNING,
'position': {
'line': line,
'column': column,
'path': PATH_INVALID_VIM_SCRIPT
}
}
def test_get_violation_if_found_when_file_is_invalid(self):
expected_violations = [
self.create_violation(4, 9),
self.create_violation(5, 10),
]
self.assertFoundViolationsEqual(PATH_INVALID_VIM_SCRIPT,
ProhibitImplicitScopeBuiltinVariable,
expected_violations)
if __name__ == '__main__':
unittest.main()
|
Kuniwak/vint
|
test/integration/vint/linting/policy/test_prohibit_implicit_scope_builtin_variable.py
|
Python
|
mit
| 1,496
|
# -*- coding: utf8 -*-
"""In the Spotlight analysis module."""
import time
import datetime
import itertools
import enki
from libcrowds_analyst.analysis import helpers
from libcrowds_analyst import object_loader
MERGE_RATIO = 0.5
def get_overlap_ratio(r1, r2):
"""Return the overlap ratio of two rectangles."""
r1x2 = r1['x'] + r1['w']
r2x2 = r2['x'] + r2['w']
r1y2 = r1['y'] + r1['h']
r2y2 = r2['y'] + r2['h']
x_overlap = max(0, min(r1x2, r2x2) - max(r1['x'], r2['x']))
y_overlap = max(0, min(r1y2, r2y2) - max(r1['y'], r2['y']))
intersection = x_overlap * y_overlap
r1_area = r1['w'] * r1['h']
r2_area = r2['w'] * r2['h']
union = r1_area + r2_area - intersection
overlap = float(intersection) / float(union)
return overlap
def get_rect_from_selection(anno):
"""Return a rectangle from a selection annotation."""
media_frag = anno['target']['selector']['value']
regions = media_frag.split('=')[1].split(',')
return {
'x': int(round(float(regions[0]))),
'y': int(round(float(regions[1]))),
'w': int(round(float(regions[2]))),
'h': int(round(float(regions[3])))
}
def merge_rects(r1, r2):
"""Merge two rectangles."""
return {
'x': min(r1['x'], r2['x']),
'y': min(r1['y'], r2['y']),
'w': max(r1['x'] + r1['w'], r2['x'] + r2['w']) - r2['x'],
'h': max(r1['y'] + r1['h'], r2['y'] + r2['h']) - r2['y']
}
def update_selector(anno, rect):
"""Update amedia frag selector."""
frag = '?xywh={0},{1},{2},{3}'.format(rect['x'], rect['y'], rect['w'],
rect['h'])
anno['target']['selector']['value'] = frag
anno['modified'] = datetime.datetime.now().isoformat()
def analyse_selections(api_key, endpoint, project_id, result_id, path, doi,
project_short_name, throttle, **kwargs):
"""Analyse In the Spotlight selection results."""
e = enki.Enki(api_key, endpoint, project_short_name, all=1)
result = enki.pbclient.find_results(project_id, id=result_id, limit=1,
all=1)[0]
df = helpers.get_task_run_df(e, result.task_id)
# Flatten annotations into a single list
anno_list = df['info'].tolist()
anno_list = list(itertools.chain.from_iterable(anno_list))
defaults = {'annotations': []}
result.info = helpers.init_result_info(doi, path, defaults)
clusters = []
comments = []
# Cluster similar regions
for anno in anno_list:
if anno['motivation'] == 'commenting':
comments.append(anno)
continue
elif anno['motivation'] == 'tagging':
r1 = get_rect_from_selection(anno)
matched = False
for cluster in clusters:
r2 = get_rect_from_selection(cluster)
overlap_ratio = get_overlap_ratio(r1, r2)
if overlap_ratio > MERGE_RATIO:
matched = True
r3 = merge_rects(r1, r2)
update_selector(cluster, r3)
if not matched:
update_selector(anno, r1) # still update to round rect params
clusters.append(anno)
else: # pragma: no cover
raise ValueError('Unhandled motivation')
result.info['annotations'] = clusters + comments
enki.pbclient.update_result(result)
time.sleep(throttle)
def analyse_all_selections(**kwargs):
"""Analyse all In the Spotlight selection results."""
e = enki.Enki(kwargs['api_key'], kwargs['endpoint'],
kwargs['project_short_name'], all=1)
results = object_loader.load(enki.pbclient.find_results,
project_id=e.project.id, all=1)
for result in results:
kwargs['project_id'] = e.project.id
kwargs['result_id'] = result.id
analyse_selections(**kwargs.copy())
helpers.send_mail({
'recipients': kwargs['mail_recipients'],
'subject': 'Analysis complete',
'body': '''
All {0} results for {1} have been analysed.
'''.format(len(results), e.project.name)
})
|
LibCrowds/libcrowds-analyst
|
libcrowds_analyst/analysis/playbills.py
|
Python
|
mit
| 4,184
|
import ftplib
import os.path as op
import logging
import os
log = logging.getLogger(__name__)
def download_coding_sequences(patric_id, seqtype, outdir='', outfile='', force_rerun=False):
"""Download the entire set of DNA or protein sequences from protein-encoding genes in a genome from NCBI.
Saves a FASTA file in the optional directory specified.
Args:
genome_accession_or_id (str): PATRIC ID
seqtype (str): "dna" or "protein" - if you want the coding sequences in DNA or amino acid formats.
outdir (str): optional output directory (default is the current directory)
outfile (str): optional custom name for file
force_rerun (bool): if you want to redownload existing files
Returns:
Path to downloaded FASTA file.
"""
if seqtype == 'dna':
extension = 'ffn'
elif seqtype == 'protein':
extension = 'faa'
else:
raise ValueError('seqtype must be "dna" or "protein"')
# TODO: use utils functions here
# path and filename parsing
if outfile:
outfile = op.join(outdir, '{}.{}'.format(outfile, extension))
else:
# if no outfile is specified, default is "$GI.PATRIC.faa"
outfile = op.join(outdir, '{}.PATRIC.{}'.format(patric_id, extension))
if not force_rerun:
# return the path to the file if it was already downloaded
if op.exists(outfile) and os.stat(outfile).st_size != 0:
log.debug('FASTA file already exists at {}'.format(outfile))
return outfile
try:
ftp = ftplib.FTP('ftp.patricbrc.org')
ftp.login()
ftp.cwd("/patric2/patric3/genomes/{0}/".format(patric_id))
with open(outfile, "wb") as gFile:
ftp.retrbinary('RETR {0}.PATRIC.{1}'.format(patric_id, extension), gFile.write)
ftp.quit()
# TODO: check exceptions
except:
return None
return outfile
|
SBRG/ssbio
|
ssbio/databases/patric.py
|
Python
|
mit
| 1,919
|
#!/usr/bin/env python3
# vim: set fileencoding=utf8 :
import string
import random
import loremipsum
import hashlib
import json
import sqlite3
import socket
import tornado.web
import tornado.ioloop
import webtest
from tornado import gen
import tornado.testing
from tornado import netutil
from tornado.testing import AsyncHTTPTestCase
from tornado.wsgi import WSGIAdapter
from http.cookiejar import CookieJar
import tornado_jsonapi.handlers
import tornado_jsonapi.resource
posts_schema = json.loads("""
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "post",
"description": "Example blog post API for tornado_jsonapi",
"type": "object",
"properties": {
"text":
{
"type": "string",
"description": "post body"
},
"author":
{
"type": "string",
"description": "author name"
}
},
"required": [ "text", "author" ],
"additionalProperties": false
}
""")
class PostGenerator:
def generate_text(self):
return ''.join(loremipsum.get_sentences(1))
def generate_post(self):
author = ''.join([random.choice(string.ascii_lowercase)
for i in range(5)])
return {'author': author, 'text': self.generate_text()}
def generate_resource(self, post=None, additional_data=None):
if post is None:
post = self.generate_post()
data = {'type': 'post', 'attributes': post}
if additional_data is not None:
data.update(additional_data)
return {'data': data}
class BaseTestCase(AsyncHTTPTestCase):
''' Base class for all test cases.
We need to derive from AsyncTestCase for it creates tornado IOLoop
impicitly.
'''
empty_id = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
def __init__(self, *args, **kwargs):
AsyncHTTPTestCase.__init__(self, *args, **kwargs)
self.maxDiff = None
self._app = None
def construct_app(self):
raise NotImplementedError
def get_app(self):
if self._app is None:
app = self.construct_app()
app.io_loop = self.io_loop
return app
else:
return self._app
def content_type(self):
return 'application/vnd.api+json'
def setUp(self):
AsyncHTTPTestCase.setUp(self)
self.app = webtest.TestApp(
WSGIAdapter(self.get_app()),
cookiejar=CookieJar())
class Posts(tornado_jsonapi.resource.Resource):
class ResourceObject:
def __init__(self, resource, data):
self.resource = resource
self.data = data
def id_(self):
hsh = hashlib.sha1()
values = [self.data[attr] for attr in sorted(self.data)]
for v in values:
hsh.update(bytes(str(v), 'utf-8'))
return hsh.hexdigest()
def type_(self):
return 'post'
def attributes(self):
return self.data
def __init__(self, data):
self.data = data
super().__init__(posts_schema)
def name(self):
return 'post'
def create(self, attributes):
self.data.append(attributes)
return Posts.ResourceObject(self, attributes)
def read(self, id_):
for p in self.data:
post = Posts.ResourceObject(self, p)
if post.id_() == id_:
return post
def update(self, id_, attributes):
for p in self.data:
post = Posts.ResourceObject(self, p)
if post.id_() == id_:
p.update(attributes)
return post # XXX ?
def delete(self, id_):
for p in self.data:
post = Posts.ResourceObject(self, p)
if post.id_() == id_:
self.data.remove(p)
return True
return False
def list_(self, limit=0, page=0):
return [Posts.ResourceObject(self, p) for p in self.data]
def list_count(self):
return len(self.data)
class SimpleAppMixin:
def construct_app(self):
data = """
[
{
"text": "RAWR I'm a lion",
"author": "Andrew"
},
{
"text": "я - лѣвъ!",
"author": "Андрей"
}
]
"""
app = tornado.web.Application([
(
r"/api/posts/([^/]*)",
tornado_jsonapi.handlers.APIHandler,
dict(resource=Posts(json.loads(data)))
),
], **tornado_jsonapi.handlers.not_found_handling_settings())
return app
def get_first_post_id(self):
res = self.app.get('/api/posts/')
posts = json.loads(res.body.decode(encoding='utf-8'))
return posts['data'][0]['id']
class SQLAlchemyMixin:
def construct_app(self):
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///:memory:', echo=True)
Base = declarative_base()
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key=True)
author = Column(String)
text = Column(String)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
app = tornado.web.Application([
(
r"/api/posts/([^/]*)",
tornado_jsonapi.handlers.APIHandler,
dict(resource=tornado_jsonapi.resource.SQLAlchemyResource(
Post, Session))
),
], **tornado_jsonapi.handlers.not_found_handling_settings())
return app
class DBAPI2Mixin:
def construct_app(self):
resource = tornado_jsonapi.resource.DBAPI2Resource(
posts_schema, sqlite3, sqlite3.connect(':memory:'))
resource._create_table()
app = tornado.web.Application([
(
r"/api/posts/([^/]*)",
tornado_jsonapi.handlers.APIHandler,
dict(resource=resource)
),
], **tornado_jsonapi.handlers.not_found_handling_settings())
return app
class SlowpokePosts(tornado_jsonapi.resource.Resource):
class ResourceObject:
def __init__(self, resource, data):
self.resource = resource
self.data = data
def id_(self):
hsh = hashlib.sha1()
values = [self.data[attr] for attr in sorted(self.data)]
for v in values:
hsh.update(bytes(str(v), 'utf-8'))
return hsh.hexdigest()
def type_(self):
return 'post'
def attributes(self):
return self.data
def __init__(self, data):
self.data = data
super().__init__(posts_schema)
def name(self):
return 'post'
@gen.coroutine
def create(self, attributes):
yield gen.sleep(1)
self.data.append(attributes)
return Posts.ResourceObject(self, attributes)
@gen.coroutine
def read(self, id_):
yield gen.sleep(1)
for p in self.data:
post = Posts.ResourceObject(self, p)
if post.id_() == id_:
return post
@gen.coroutine
def update(self, id_, attributes):
yield gen.sleep(1)
for p in self.data:
post = Posts.ResourceObject(self, p)
if post.id_() == id_:
p.update(attributes)
return post # XXX ?
@gen.coroutine
def delete(self, id_):
yield gen.sleep(1)
for p in self.data:
post = Posts.ResourceObject(self, p)
if post.id_() == id_:
self.data.remove(p)
return True
return False
@gen.coroutine
def list_(self):
return [Posts.ResourceObject(self, p) for p in self.data]
class SlowAppMixin:
def construct_app(self):
data = """
[
{
"text": "",
"author": ""
},
{
"text": "RAWR I'm a lion",
"author": "Andrew"
},
{
"text": "я - лѣвъ!",
"author": "Андрей"
}
]
"""
app = tornado.web.Application([
(
r"/api/posts/([^/]*)",
tornado_jsonapi.handlers.APIHandler,
dict(resource=SlowpokePosts(json.loads(data)))
),
], **tornado_jsonapi.handlers.not_found_handling_settings())
return app
def get_first_post_id(self):
res = self.app.get('/api/posts/')
posts = json.loads(res.body.decode(encoding='utf-8'))
return posts['data'][0]['id']
def _bind_unused_port(reuse_port=False):
'''
See https://github.com/tornadoweb/tornado/pull/1574
'''
sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
reuse_port=reuse_port)[0]
port = sock.getsockname()[1]
return sock, port
tornado.testing.bind_unused_port = _bind_unused_port
|
lockie/tornado_jsonapi
|
test/__init__.py
|
Python
|
mit
| 9,383
|
#!/usr/bin/env python3
import socket
import selectors
baseSelector = selectors.DefaultSelector()
if __name__ == '__main__':
with open('../poll.py', 'r') as f:
baseSelector.register(f, selectors.EVENT_READ)
for k, v in baseSelector.select(0):
print(k, v)
'''
https://docs.python.org/3/library/select.html#module-select
Note that on Windows, it only works for sockets;
'''
|
JShadowMan/package
|
python/IOMultiplexing/_selector/base.py
|
Python
|
mit
| 428
|
import pickle as p
class sysfile:
def __init__(self,name="program.log",ftype="r+"):
self.f=open(name,ftype)
try:
#self.rr=self.f.read()
self.prop={"name":name,"opentype":ftype,"data":self.f.readlines(),"datastr":self.f.read()}
except:
print("error:fileerror")
self.prop={"name":name,"opentype":ftype}
print(self.prop)
self.ft=ftype
#try:
# self.byt=p.Pickler(open(name,"rb+"),p.DEFAULT_PROTOCOL)
#except:
#
# raise Exception("Pickle could not be created warning")
def init(self,name="program.log",ftype="r+"):
self.f.close()
self.f=open(name,ftype)
self.prop={"name":name,"opentype":ftype,"data":self.f.readlines(),"datastr":self.f.read()}
self.ft=ftype
def addlines(self,lines=[]):
if not self.ftype=="r":
self.f.writelines(lines)
def add(self,word="",glitch=""):
print("the glitch was",glitch)
self.f.write(word)
def close(self):
self.f.close()
self.f=None
def pack(files,filen):
packf=[]
for x in files:
print("packing file:",x)
s=sysfile(x)
s.close()
packf.append(s)
p.dump(packf,open(filen,"wb"))
def unpack(filen):
f=open(filen,"rb+")
dat=p.load(f)
for data in dat:
cf=sysfile(data.prop["name"],"w")
print("this is: ",data.prop["datastr"])
cf.add(data.prop["datastr"])#prop["datastr"])
cf.close()
def test():
pack(["read.txt"],"r.ppf")
|
javaarchive/PIDLE
|
systools.py
|
Python
|
mit
| 1,768
|
"""Add not null constraints everywhere
Revision ID: ba6fefa33e22
Revises: 2cb2db7089f4
Create Date: 2016-09-12 23:50:53.526022
"""
# revision identifiers, used by Alembic.
revision = 'ba6fefa33e22'
down_revision = '2cb2db7089f4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('game', 'black_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('game', 'date_played',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('game', 'date_reported',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('game', 'handicap',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('game', 'komi',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.alter_column('game', 'rated',
existing_type=sa.BOOLEAN(),
nullable=False)
op.alter_column('game', 'result',
existing_type=sa.VARCHAR(length=10),
nullable=False)
op.alter_column('game', 'server_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('game', 'white_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('go_server', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=False)
op.alter_column('go_server', 'token',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('go_server', 'url',
existing_type=sa.VARCHAR(length=180),
nullable=False)
op.alter_column('myuser', 'active',
existing_type=sa.BOOLEAN(),
nullable=False)
op.alter_column('myuser', 'claimed',
existing_type=sa.BOOLEAN(),
nullable=False)
op.alter_column('myuser', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=False)
op.alter_column('myuser', 'login_count',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('player', 'name',
existing_type=sa.VARCHAR(length=20),
nullable=False)
op.alter_column('player', 'server_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('player', 'token',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('player', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('rating', 'rating',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.alter_column('rating', 'sigma',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.alter_column('rating', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('role', 'description',
existing_type=sa.VARCHAR(length=255),
nullable=False)
op.alter_column('role', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('role', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=True)
op.alter_column('role', 'description',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('rating', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('rating', 'sigma',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('rating', 'rating',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('player', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('player', 'token',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('player', 'server_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('player', 'name',
existing_type=sa.VARCHAR(length=20),
nullable=True)
op.alter_column('myuser', 'login_count',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('myuser', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('myuser', 'claimed',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('myuser', 'active',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('go_server', 'url',
existing_type=sa.VARCHAR(length=180),
nullable=True)
op.alter_column('go_server', 'token',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('go_server', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=True)
op.alter_column('game', 'white_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('game', 'server_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('game', 'result',
existing_type=sa.VARCHAR(length=10),
nullable=True)
op.alter_column('game', 'rated',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('game', 'komi',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('game', 'handicap',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('game', 'date_reported',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('game', 'date_played',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('game', 'black_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
|
usgo/online-ratings
|
web/migrations/versions/ba6fefa33e22_add_not_null_constraints_everywhere.py
|
Python
|
mit
| 6,577
|
import json
from django.test import TestCase
from django.contrib.auth.models import User
def setUp():
User.objects.create_user(
first_name='brett', email='theiviaxx@gmail.com', password='top_secret')
class FrogTestCase(TestCase):
fixtures = ['test_data.json']
def test_filter(self):
res = self.client.get('/frog/gallery/1/filter')
data = json.loads(res.content)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['isSuccess'], True)
res = self.client.get('/frog/gallery/1#{"filters":[[100000000]]}')
self.assertEqual(res.status_code, 200)
def test_get(self):
res = self.client.get('/frog/gallery/1')
self.assertEqual(res.status_code, 200)
def test_create_gallery(self):
res = self.client.post('/frog/gallery', {'title': 'test_gallery'})
print res.content
self.assertEqual(res.status_code, 200)
res = self.client.post('/frog/gallery')
self.assertEqual(res.status_code, 200)
res = self.client.get('/frog/gallery')
data = json.loads(res.content)
self.assertEqual(len(data['values']), 3)
|
theiviaxx/Frog
|
frog/tests.py
|
Python
|
mit
| 1,158
|
import os, sys, re, codecs
from setuptools import setup, find_packages
def read(*parts):
# intentionally *not* adding an encoding option to open
# see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts), 'r').read()
long_description = "\n" + "\n".join([read('README.rst'), read('PROJECT.txt')])
setup(name="op",
version="0.1",
description="Fast file finder and opener.",
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
],
keywords='development environment tool git find files editor',
author='Colm O\'Connor',
author_email='colm.oconnor.github@gmail.com',
url='https://github.com/crdoconnor/op',
license='MIT',
install_requires=['pyyaml',],
packages=find_packages(exclude=[]),
package_data={},
entry_points=dict(console_scripts=['op=op:cli',]),
zip_safe=False,
)
|
crdoconnor/op
|
setup.py
|
Python
|
mit
| 1,473
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.validators import RegexValidator
from django.forms import ImageField as DjangoImageField
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.encoding import is_protected_type, smart_text
from django.utils.translation import ugettext_lazy as _
from rest_framework import ISO_8601
from rest_framework.compat import (
EmailValidator, MinValueValidator, MaxValueValidator,
MinLengthValidator, MaxLengthValidator, URLValidator, OrderedDict,
unicode_repr, unicode_to_repr
)
from rest_framework.exceptions import ValidationError
from rest_framework.settings import api_settings
from rest_framework.utils import html, representation, humanize_datetime
import collections
import copy
import datetime
import decimal
import inspect
import re
class empty:
"""
This class is used to represent no data being provided for a given input
or output value.
It is required because `None` may be a valid input or output value.
"""
pass
def is_simple_callable(obj):
"""
True if the object is a callable that takes no arguments.
"""
function = inspect.isfunction(obj)
method = inspect.ismethod(obj)
if not (function or method):
return False
args, _, _, defaults = inspect.getargspec(obj)
len_args = len(args) if function else len(args) - 1
len_defaults = len(defaults) if defaults else 0
return len_args <= len_defaults
def get_attribute(instance, attrs):
"""
Similar to Python's built in `getattr(instance, attr)`,
but takes a list of nested attributes, instead of a single attribute.
Also accepts either attribute lookup on objects or dictionary lookups.
"""
for attr in attrs:
if instance is None:
# Break out early if we get `None` at any point in a nested lookup.
return None
try:
if isinstance(instance, collections.Mapping):
instance = instance[attr]
else:
instance = getattr(instance, attr)
except ObjectDoesNotExist:
return None
if is_simple_callable(instance):
instance = instance()
return instance
def set_value(dictionary, keys, value):
"""
Similar to Python's built in `dictionary[key] = value`,
but takes a list of nested keys instead of a single key.
set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2}
set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2}
set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}
"""
if not keys:
dictionary.update(value)
return
for key in keys[:-1]:
if key not in dictionary:
dictionary[key] = {}
dictionary = dictionary[key]
dictionary[keys[-1]] = value
class CreateOnlyDefault:
"""
This class may be used to provide default values that are only used
for create operations, but that do not return any value for update
operations.
"""
def __init__(self, default):
self.default = default
def set_context(self, serializer_field):
self.is_update = serializer_field.parent.instance is not None
def __call__(self):
if self.is_update:
raise SkipField()
if callable(self.default):
return self.default()
return self.default
def __repr__(self):
return unicode_to_repr(
'%s(%s)' % (self.__class__.__name__, unicode_repr(self.default))
)
class CurrentUserDefault:
def set_context(self, serializer_field):
self.user = serializer_field.context['request'].user
def __call__(self):
return self.user
def __repr__(self):
return unicode_to_repr('%s()' % self.__class__.__name__)
class SkipField(Exception):
pass
NOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'
NOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'
NOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'
USE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'
MISSING_ERROR_MESSAGE = (
'ValidationError raised by `{class_name}`, but error key `{key}` does '
'not exist in the `error_messages` dictionary.'
)
class Field(object):
_creation_counter = 0
default_error_messages = {
'required': _('This field is required.'),
'null': _('This field may not be null.')
}
default_validators = []
default_empty_html = empty
initial = None
def __init__(self, read_only=False, write_only=False,
required=None, default=empty, initial=empty, source=None,
label=None, help_text=None, style=None,
error_messages=None, validators=None, allow_null=False):
self._creation_counter = Field._creation_counter
Field._creation_counter += 1
# If `required` is unset, then use `True` unless a default is provided.
if required is None:
required = default is empty and not read_only
# Some combinations of keyword arguments do not make sense.
assert not (read_only and write_only), NOT_READ_ONLY_WRITE_ONLY
assert not (read_only and required), NOT_READ_ONLY_REQUIRED
assert not (required and default is not empty), NOT_REQUIRED_DEFAULT
assert not (read_only and self.__class__ == Field), USE_READONLYFIELD
self.read_only = read_only
self.write_only = write_only
self.required = required
self.default = default
self.source = source
self.initial = self.initial if (initial is empty) else initial
self.label = label
self.help_text = help_text
self.style = {} if style is None else style
self.allow_null = allow_null
if self.default_empty_html is not empty:
if not required:
self.default_empty_html = empty
elif default is not empty:
self.default_empty_html = default
if validators is not None:
self.validators = validators[:]
# These are set up by `.bind()` when the field is added to a serializer.
self.field_name = None
self.parent = None
# Collect default error message from self and parent classes
messages = {}
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def bind(self, field_name, parent):
"""
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
"""
# In order to enforce a consistent style, we error if a redundant
# 'source' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
assert self.source != field_name, (
"It is redundant to specify `source='%s'` on field '%s' in "
"serializer '%s', because it is the same as the field name. "
"Remove the `source` keyword argument." %
(field_name, self.__class__.__name__, parent.__class__.__name__)
)
self.field_name = field_name
self.parent = parent
# `self.label` should default to being based on the field name.
if self.label is None:
self.label = field_name.replace('_', ' ').capitalize()
# self.source should default to being the same as the field name.
if self.source is None:
self.source = field_name
# self.source_attrs is a list of attributes that need to be looked up
# when serializing the instance, or populating the validated data.
if self.source == '*':
self.source_attrs = []
else:
self.source_attrs = self.source.split('.')
# .validators is a lazily loaded property, that gets its default
# value from `get_validators`.
@property
def validators(self):
if not hasattr(self, '_validators'):
self._validators = self.get_validators()
return self._validators
@validators.setter
def validators(self, validators):
self._validators = validators
def get_validators(self):
return self.default_validators[:]
def get_initial(self):
"""
Return a value to use when the field is being returned as a primitive
value, without any object instance.
"""
return self.initial
def get_value(self, dictionary):
"""
Given the *incoming* primitive data, return the value for this field
that should be validated and transformed to a native value.
"""
if html.is_html_input(dictionary):
# HTML forms will represent empty fields as '', and cannot
# represent None or False values directly.
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return self.default_empty_html
ret = dictionary[self.field_name]
if ret == '' and self.allow_null:
# If the field is blank, and null is a valid value then
# determine if we should use null instead.
return '' if getattr(self, 'allow_blank', False) else None
return ret
return dictionary.get(self.field_name, empty)
def get_attribute(self, instance):
"""
Given the *outgoing* object instance, return the primitive value
that should be used for this field.
"""
try:
return get_attribute(instance, self.source_attrs)
except (KeyError, AttributeError) as exc:
if not self.required and self.default is empty:
raise SkipField()
msg = (
'Got {exc_type} when attempting to get a value for field '
'`{field}` on serializer `{serializer}`.\nThe serializer '
'field might be named incorrectly and not match '
'any attribute or key on the `{instance}` instance.\n'
'Original exception text was: {exc}.'.format(
exc_type=type(exc).__name__,
field=self.field_name,
serializer=self.parent.__class__.__name__,
instance=instance.__class__.__name__,
exc=exc
)
)
raise type(exc)(msg)
def get_default(self):
"""
Return the default value to use when validating data if no input
is provided for this field.
If a default has not been set for this field then this will simply
return `empty`, indicating that no value should be set in the
validated data for this field.
"""
if self.default is empty:
raise SkipField()
if callable(self.default):
if hasattr(self.default, 'set_context'):
self.default.set_context(self)
return self.default()
return self.default
def validate_empty_values(self, data):
"""
Validate empty values, and either:
* Raise `ValidationError`, indicating invalid data.
* Raise `SkipField`, indicating that the field should be ignored.
* Return (True, data), indicating an empty value that should be
returned without any furhter validation being applied.
* Return (False, data), indicating a non-empty value, that should
have validation applied as normal.
"""
if self.read_only:
return (True, self.get_default())
if data is empty:
if getattr(self.root, 'partial', False):
raise SkipField()
if self.required:
self.fail('required')
return (True, self.get_default())
if data is None:
if not self.allow_null:
self.fail('null')
return (True, None)
return (False, data)
def run_validation(self, data=empty):
"""
Validate a simple representation and return the internal value.
The provided data may be `empty` if no representation was included
in the input.
May raise `SkipField` if the field should not be included in the
validated data.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
self.run_validators(value)
return value
def run_validators(self, value):
"""
Test the given value against all the validators on the field,
and either raise a `ValidationError` or simply return.
"""
errors = []
for validator in self.validators:
if hasattr(validator, 'set_context'):
validator.set_context(self)
try:
validator(value)
except ValidationError as exc:
# If the validation error contains a mapping of fields to
# errors then simply raise it immediately rather than
# attempting to accumulate a list of errors.
if isinstance(exc.detail, dict):
raise
errors.extend(exc.detail)
except DjangoValidationError as exc:
errors.extend(exc.messages)
if errors:
raise ValidationError(errors)
def to_internal_value(self, data):
"""
Transform the *incoming* primitive data into a native value.
"""
raise NotImplementedError(
'{cls}.to_internal_value() must be implemented.'.format(
cls=self.__class__.__name__
)
)
def to_representation(self, value):
"""
Transform the *outgoing* native value into primitive data.
"""
raise NotImplementedError(
'{cls}.to_representation() must be implemented.\n'
'If you are upgrading from REST framework version 2 '
'you might want `ReadOnlyField`.'.format(
cls=self.__class__.__name__
)
)
def fail(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
message_string = msg.format(**kwargs)
raise ValidationError(message_string)
@property
def root(self):
"""
Returns the top-level serializer for this field.
"""
root = self
while root.parent is not None:
root = root.parent
return root
@property
def context(self):
"""
Returns the context as passed to the root serializer on initialization.
"""
return getattr(self.root, '_context', {})
def __new__(cls, *args, **kwargs):
"""
When a field is instantiated, we store the arguments that were used,
so that we can present a helpful representation of the object.
"""
instance = super(Field, cls).__new__(cls)
instance._args = args
instance._kwargs = kwargs
return instance
def __deepcopy__(self, memo):
"""
When cloning fields we instantiate using the arguments it was
originally created with, rather than copying the complete state.
"""
args = copy.deepcopy(self._args)
kwargs = dict(self._kwargs)
# Bit ugly, but we need to special case 'validators' as Django's
# RegexValidator does not support deepcopy.
# We treat validator callables as immutable objects.
# See https://github.com/tomchristie/django-rest-framework/issues/1954
validators = kwargs.pop('validators', None)
kwargs = copy.deepcopy(kwargs)
if validators is not None:
kwargs['validators'] = validators
return self.__class__(*args, **kwargs)
def __repr__(self):
"""
Fields are represented using their initial calling arguments.
This allows us to create descriptive representations for serializer
instances that show all the declared fields on the serializer.
"""
return unicode_to_repr(representation.field_repr(self))
# Boolean types...
class BooleanField(Field):
default_error_messages = {
'invalid': _('`{input}` is not a valid boolean.')
}
default_empty_html = False
initial = False
TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
def __init__(self, **kwargs):
assert 'allow_null' not in kwargs, '`allow_null` is not a valid option. Use `NullBooleanField` instead.'
super(BooleanField, self).__init__(**kwargs)
def to_internal_value(self, data):
if data in self.TRUE_VALUES:
return True
elif data in self.FALSE_VALUES:
return False
self.fail('invalid', input=data)
def to_representation(self, value):
if value in self.TRUE_VALUES:
return True
elif value in self.FALSE_VALUES:
return False
return bool(value)
class NullBooleanField(Field):
default_error_messages = {
'invalid': _('`{input}` is not a valid boolean.')
}
initial = None
TRUE_VALUES = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
FALSE_VALUES = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
NULL_VALUES = set(('n', 'N', 'null', 'Null', 'NULL', '', None))
def __init__(self, **kwargs):
assert 'allow_null' not in kwargs, '`allow_null` is not a valid option.'
kwargs['allow_null'] = True
super(NullBooleanField, self).__init__(**kwargs)
def to_internal_value(self, data):
if data in self.TRUE_VALUES:
return True
elif data in self.FALSE_VALUES:
return False
elif data in self.NULL_VALUES:
return None
self.fail('invalid', input=data)
def to_representation(self, value):
if value in self.NULL_VALUES:
return None
if value in self.TRUE_VALUES:
return True
elif value in self.FALSE_VALUES:
return False
return bool(value)
# String types...
class CharField(Field):
default_error_messages = {
'blank': _('This field may not be blank.'),
'max_length': _('Ensure this field has no more than {max_length} characters.'),
'min_length': _('Ensure this field has at least {min_length} characters.')
}
initial = ''
def __init__(self, **kwargs):
self.allow_blank = kwargs.pop('allow_blank', False)
max_length = kwargs.pop('max_length', None)
min_length = kwargs.pop('min_length', None)
super(CharField, self).__init__(**kwargs)
if max_length is not None:
message = self.error_messages['max_length'].format(max_length=max_length)
self.validators.append(MaxLengthValidator(max_length, message=message))
if min_length is not None:
message = self.error_messages['min_length'].format(min_length=min_length)
self.validators.append(MinLengthValidator(min_length, message=message))
def run_validation(self, data=empty):
# Test for the empty string here so that it does not get validated,
# and so that subclasses do not need to handle it explicitly
# inside the `to_internal_value()` method.
if data == '':
if not self.allow_blank:
self.fail('blank')
return ''
return super(CharField, self).run_validation(data)
def to_internal_value(self, data):
return six.text_type(data)
def to_representation(self, value):
return six.text_type(value)
class EmailField(CharField):
default_error_messages = {
'invalid': _('Enter a valid email address.')
}
def __init__(self, **kwargs):
super(EmailField, self).__init__(**kwargs)
validator = EmailValidator(message=self.error_messages['invalid'])
self.validators.append(validator)
def to_internal_value(self, data):
return six.text_type(data).strip()
def to_representation(self, value):
return six.text_type(value).strip()
class RegexField(CharField):
default_error_messages = {
'invalid': _('This value does not match the required pattern.')
}
def __init__(self, regex, **kwargs):
super(RegexField, self).__init__(**kwargs)
validator = RegexValidator(regex, message=self.error_messages['invalid'])
self.validators.append(validator)
class SlugField(CharField):
default_error_messages = {
'invalid': _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.")
}
def __init__(self, **kwargs):
super(SlugField, self).__init__(**kwargs)
slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')
validator = RegexValidator(slug_regex, message=self.error_messages['invalid'])
self.validators.append(validator)
class URLField(CharField):
default_error_messages = {
'invalid': _("Enter a valid URL.")
}
def __init__(self, **kwargs):
super(URLField, self).__init__(**kwargs)
validator = URLValidator(message=self.error_messages['invalid'])
self.validators.append(validator)
# Number types...
class IntegerField(Field):
default_error_messages = {
'invalid': _('A valid integer is required.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_string_length': _('String value too large')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, **kwargs):
max_value = kwargs.pop('max_value', None)
min_value = kwargs.pop('min_value', None)
super(IntegerField, self).__init__(**kwargs)
if max_value is not None:
message = self.error_messages['max_value'].format(max_value=max_value)
self.validators.append(MaxValueValidator(max_value, message=message))
if min_value is not None:
message = self.error_messages['min_value'].format(min_value=min_value)
self.validators.append(MinValueValidator(min_value, message=message))
def to_internal_value(self, data):
if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
data = int(data)
except (ValueError, TypeError):
self.fail('invalid')
return data
def to_representation(self, value):
return int(value)
class FloatField(Field):
default_error_messages = {
'invalid': _("A valid number is required."),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_string_length': _('String value too large')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, **kwargs):
max_value = kwargs.pop('max_value', None)
min_value = kwargs.pop('min_value', None)
super(FloatField, self).__init__(**kwargs)
if max_value is not None:
message = self.error_messages['max_value'].format(max_value=max_value)
self.validators.append(MaxValueValidator(max_value, message=message))
if min_value is not None:
message = self.error_messages['min_value'].format(min_value=min_value)
self.validators.append(MinValueValidator(min_value, message=message))
def to_internal_value(self, data):
if isinstance(data, six.text_type) and len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
return float(data)
except (TypeError, ValueError):
self.fail('invalid')
def to_representation(self, value):
return float(value)
class DecimalField(Field):
default_error_messages = {
'invalid': _('A valid number is required.'),
'max_value': _('Ensure this value is less than or equal to {max_value}.'),
'min_value': _('Ensure this value is greater than or equal to {min_value}.'),
'max_digits': _('Ensure that there are no more than {max_digits} digits in total.'),
'max_decimal_places': _('Ensure that there are no more than {max_decimal_places} decimal places.'),
'max_whole_digits': _('Ensure that there are no more than {max_whole_digits} digits before the decimal point.'),
'max_string_length': _('String value too large')
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
coerce_to_string = api_settings.COERCE_DECIMAL_TO_STRING
def __init__(self, max_digits, decimal_places, coerce_to_string=None, max_value=None, min_value=None, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.coerce_to_string = coerce_to_string if (coerce_to_string is not None) else self.coerce_to_string
super(DecimalField, self).__init__(**kwargs)
if max_value is not None:
message = self.error_messages['max_value'].format(max_value=max_value)
self.validators.append(MaxValueValidator(max_value, message=message))
if min_value is not None:
message = self.error_messages['min_value'].format(min_value=min_value)
self.validators.append(MinValueValidator(min_value, message=message))
def to_internal_value(self, data):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
data = smart_text(data).strip()
if len(data) > self.MAX_STRING_LENGTH:
self.fail('max_string_length')
try:
value = decimal.Decimal(data)
except decimal.DecimalException:
self.fail('invalid')
# Check for NaN. It is the only value that isn't equal to itself,
# so we can use this to identify NaN values.
if value != value:
self.fail('invalid')
# Check for infinity and negative infinity.
if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')):
self.fail('invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
self.fail('max_digits', max_digits=self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
self.fail('max_decimal_places', max_decimal_places=self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
self.fail('max_whole_digits', max_whole_digits=self.max_digits - self.decimal_places)
return value
def to_representation(self, value):
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(six.text_type(value).strip())
context = decimal.getcontext().copy()
context.prec = self.max_digits
quantized = value.quantize(
decimal.Decimal('.1') ** self.decimal_places,
context=context
)
if not self.coerce_to_string:
return quantized
return '{0:f}'.format(quantized)
# Date & time fields...
class DateTimeField(Field):
default_error_messages = {
'invalid': _('Datetime has wrong format. Use one of these formats instead: {format}'),
'date': _('Expected a datetime but got a date.'),
}
format = api_settings.DATETIME_FORMAT
input_formats = api_settings.DATETIME_INPUT_FORMATS
default_timezone = timezone.get_default_timezone() if settings.USE_TZ else None
def __init__(self, format=empty, input_formats=None, default_timezone=None, *args, **kwargs):
self.format = format if format is not empty else self.format
self.input_formats = input_formats if input_formats is not None else self.input_formats
self.default_timezone = default_timezone if default_timezone is not None else self.default_timezone
super(DateTimeField, self).__init__(*args, **kwargs)
def enforce_timezone(self, value):
"""
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
"""
if (self.default_timezone is not None) and not timezone.is_aware(value):
return timezone.make_aware(value, self.default_timezone)
elif (self.default_timezone is None) and timezone.is_aware(value):
return timezone.make_naive(value, timezone.UTC())
return value
def to_internal_value(self, value):
if isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
self.fail('date')
if isinstance(value, datetime.datetime):
return self.enforce_timezone(value)
for format in self.input_formats:
if format.lower() == ISO_8601:
try:
parsed = parse_datetime(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return self.enforce_timezone(parsed)
else:
try:
parsed = datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
pass
else:
return self.enforce_timezone(parsed)
humanized_format = humanize_datetime.datetime_formats(self.input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if self.format is None:
return value
if self.format.lower() == ISO_8601:
value = value.isoformat()
if value.endswith('+00:00'):
value = value[:-6] + 'Z'
return value
return value.strftime(self.format)
class DateField(Field):
default_error_messages = {
'invalid': _('Date has wrong format. Use one of these formats instead: {format}'),
'datetime': _('Expected a date but got a datetime.'),
}
format = api_settings.DATE_FORMAT
input_formats = api_settings.DATE_INPUT_FORMATS
def __init__(self, format=empty, input_formats=None, *args, **kwargs):
self.format = format if format is not empty else self.format
self.input_formats = input_formats if input_formats is not None else self.input_formats
super(DateField, self).__init__(*args, **kwargs)
def to_internal_value(self, value):
if isinstance(value, datetime.datetime):
self.fail('datetime')
if isinstance(value, datetime.date):
return value
for format in self.input_formats:
if format.lower() == ISO_8601:
try:
parsed = parse_date(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return parsed
else:
try:
parsed = datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
pass
else:
return parsed.date()
humanized_format = humanize_datetime.date_formats(self.input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if self.format is None:
return value
# Applying a `DateField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
'Expected a `date`, but got a `datetime`. Refusing to coerce, '
'as this may mean losing timezone information. Use a custom '
'read-only field and deal with timezone issues explicitly.'
)
if self.format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(self.format)
class TimeField(Field):
default_error_messages = {
'invalid': _('Time has wrong format. Use one of these formats instead: {format}'),
}
format = api_settings.TIME_FORMAT
input_formats = api_settings.TIME_INPUT_FORMATS
def __init__(self, format=empty, input_formats=None, *args, **kwargs):
self.format = format if format is not empty else self.format
self.input_formats = input_formats if input_formats is not None else self.input_formats
super(TimeField, self).__init__(*args, **kwargs)
def to_internal_value(self, value):
if isinstance(value, datetime.time):
return value
for format in self.input_formats:
if format.lower() == ISO_8601:
try:
parsed = parse_time(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return parsed
else:
try:
parsed = datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
pass
else:
return parsed.time()
humanized_format = humanize_datetime.time_formats(self.input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if self.format is None:
return value
# Applying a `TimeField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
'Expected a `time`, but got a `datetime`. Refusing to coerce, '
'as this may mean losing timezone information. Use a custom '
'read-only field and deal with timezone issues explicitly.'
)
if self.format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(self.format)
# Choice types...
class ChoiceField(Field):
default_error_messages = {
'invalid_choice': _('`{input}` is not a valid choice.')
}
def __init__(self, choices, **kwargs):
# Allow either single or paired choices style:
# choices = [1, 2, 3]
# choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]
pairs = [
isinstance(item, (list, tuple)) and len(item) == 2
for item in choices
]
if all(pairs):
self.choices = OrderedDict([(key, display_value) for key, display_value in choices])
else:
self.choices = OrderedDict([(item, item) for item in choices])
# Map the string representation of choices to the underlying value.
# Allows us to deal with eg. integer choices while supporting either
# integer or string input, but still get the correct datatype out.
self.choice_strings_to_values = dict([
(six.text_type(key), key) for key in self.choices.keys()
])
self.allow_blank = kwargs.pop('allow_blank', False)
super(ChoiceField, self).__init__(**kwargs)
def to_internal_value(self, data):
if data == '' and self.allow_blank:
return ''
try:
return self.choice_strings_to_values[six.text_type(data)]
except KeyError:
self.fail('invalid_choice', input=data)
def to_representation(self, value):
if value in ('', None):
return value
return self.choice_strings_to_values[six.text_type(value)]
class MultipleChoiceField(ChoiceField):
default_error_messages = {
'invalid_choice': _('`{input}` is not a valid choice.'),
'not_a_list': _('Expected a list of items but got type `{input_type}`.')
}
default_empty_html = []
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
return set([
super(MultipleChoiceField, self).to_internal_value(item)
for item in data
])
def to_representation(self, value):
return set([
self.choice_strings_to_values[six.text_type(item)] for item in value
])
# File types...
class FileField(Field):
default_error_messages = {
'required': _("No file was submitted."),
'invalid': _("The submitted data was not a file. Check the encoding type on the form."),
'no_name': _("No filename could be determined."),
'empty': _("The submitted file is empty."),
'max_length': _('Ensure this filename has at most {max_length} characters (it has {length}).'),
}
use_url = api_settings.UPLOADED_FILES_USE_URL
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
self.use_url = kwargs.pop('use_url', self.use_url)
super(FileField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
try:
# `UploadedFile` objects should have name and size attributes.
file_name = data.name
file_size = data.size
except AttributeError:
self.fail('invalid')
if not file_name:
self.fail('no_name')
if not self.allow_empty_file and not file_size:
self.fail('empty')
if self.max_length and len(file_name) > self.max_length:
self.fail('max_length', max_length=self.max_length, length=len(file_name))
return data
def to_representation(self, value):
if self.use_url:
if not value:
return None
url = value.url
request = self.context.get('request', None)
if request is not None:
return request.build_absolute_uri(url)
return url
return value.name
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
'Upload a valid image. The file you uploaded was either not an '
'image or a corrupted image.'
),
}
def __init__(self, *args, **kwargs):
self._DjangoImageField = kwargs.pop('_DjangoImageField', DjangoImageField)
super(ImageField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
# Image validation is a bit grungy, so we'll just outright
# defer to Django's implementation so we don't need to
# consider it, or treat PIL as a test dependency.
file_object = super(ImageField, self).to_internal_value(data)
django_field = self._DjangoImageField()
django_field.error_messages = self.error_messages
django_field.to_python(file_object)
return file_object
# Composite field types...
class ListField(Field):
child = None
initial = []
default_error_messages = {
'not_a_list': _('Expected a list of items but got type `{input_type}`')
}
def __init__(self, *args, **kwargs):
self.child = kwargs.pop('child', copy.deepcopy(self.child))
assert self.child is not None, '`child` is a required argument.'
assert not inspect.isclass(self.child), '`child` has not been instantiated.'
super(ListField, self).__init__(*args, **kwargs)
self.child.bind(field_name='', parent=self)
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_list(dictionary, prefix=self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_list(data)
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
return [self.child.run_validation(item) for item in data]
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
return [self.child.to_representation(item) for item in data]
# Miscellaneous field types...
class ReadOnlyField(Field):
"""
A read-only field that simply returns the field value.
If the field is a method with no parameters, the method will be called
and it's return value used as the representation.
For example, the following would call `get_expiry_date()` on the object:
class ExampleSerializer(self):
expiry_date = ReadOnlyField(source='get_expiry_date')
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super(ReadOnlyField, self).__init__(**kwargs)
def to_representation(self, value):
return value
class HiddenField(Field):
"""
A hidden field does not take input from the user, or present any output,
but it does populate a field in `validated_data`, based on its default
value. This is particularly useful when we have a `unique_for_date`
constraint on a pair of fields, as we need some way to include the date in
the validated data.
"""
def __init__(self, **kwargs):
assert 'default' in kwargs, 'default is a required argument.'
kwargs['write_only'] = True
super(HiddenField, self).__init__(**kwargs)
def get_value(self, dictionary):
# We always use the default value for `HiddenField`.
# User input is never provided or accepted.
return empty
def to_internal_value(self, data):
return data
class SerializerMethodField(Field):
"""
A read-only field that get its representation from calling a method on the
parent serializer class. The method called will be of the form
"get_{field_name}", and should take a single argument, which is the
object being serialized.
For example:
class ExampleSerializer(self):
extra_info = SerializerMethodField()
def get_extra_info(self, obj):
return ... # Calculate some data to return.
"""
def __init__(self, method_name=None, **kwargs):
self.method_name = method_name
kwargs['source'] = '*'
kwargs['read_only'] = True
super(SerializerMethodField, self).__init__(**kwargs)
def bind(self, field_name, parent):
# In order to enforce a consistent style, we error if a redundant
# 'method_name' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
default_method_name = 'get_{field_name}'.format(field_name=field_name)
assert self.method_name != default_method_name, (
"It is redundant to specify `%s` on SerializerMethodField '%s' in "
"serializer '%s', because it is the same as the default method name. "
"Remove the `method_name` argument." %
(self.method_name, field_name, parent.__class__.__name__)
)
# The method name should default to `get_{field_name}`.
if self.method_name is None:
self.method_name = default_method_name
super(SerializerMethodField, self).bind(field_name, parent)
def to_representation(self, value):
method = getattr(self.parent, self.method_name)
return method(value)
class ModelField(Field):
"""
A generic field that can be used against an arbitrary model field.
This is used by `ModelSerializer` when dealing with custom model fields,
that do not have a serializer field to be mapped to.
"""
default_error_messages = {
'max_length': _('Ensure this field has no more than {max_length} characters.'),
}
def __init__(self, model_field, **kwargs):
self.model_field = model_field
# The `max_length` option is supported by Django's base `Field` class,
# so we'd better support it here.
max_length = kwargs.pop('max_length', None)
super(ModelField, self).__init__(**kwargs)
if max_length is not None:
message = self.error_messages['max_length'].format(max_length=max_length)
self.validators.append(MaxLengthValidator(max_length, message=message))
def to_internal_value(self, data):
rel = getattr(self.model_field, 'rel', None)
if rel is not None:
return rel.to._meta.get_field(rel.field_name).to_python(data)
return self.model_field.to_python(data)
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def to_representation(self, obj):
value = self.model_field._get_val_from_obj(obj)
if is_protected_type(value):
return value
return self.model_field.value_to_string(obj)
|
paulormart/gae-project-skeleton-100
|
gae/lib/rest_framework/fields.py
|
Python
|
mit
| 47,325
|
"""autogenerated by genpy from aidu_gui/Solenoid.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Solenoid(genpy.Message):
_md5sum = "cb57accc540fd18e2aa6911a9b7363e5"
_type = "aidu_gui/Solenoid"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int8 solenoid_number
int8 lade_1=1
int8 lade_2=2
int8 lade_3=3
"""
# Pseudo-constants
lade_1 = 1
lade_2 = 2
lade_3 = 3
__slots__ = ['solenoid_number']
_slot_types = ['int8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
solenoid_number
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Solenoid, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.solenoid_number is None:
self.solenoid_number = 0
else:
self.solenoid_number = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_b.pack(self.solenoid_number))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.solenoid_number,) = _struct_b.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_b.pack(self.solenoid_number))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.solenoid_number,) = _struct_b.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_b = struct.Struct("<b")
|
MartienLagerweij/aidu
|
aidu_gui/src/aidu_gui/msg/_Solenoid.py
|
Python
|
mit
| 3,338
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/base/shared_bothan_base_female.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bothan_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/creature/npc/base/shared_bothan_base_female.py
|
Python
|
mit
| 457
|
from .delete_nth import *
from .flatten import *
from .garage import *
from .josephus import *
from .longest_non_repeat import *
from .max_ones_index import *
from .merge_intervals import *
from .missing_ranges import *
from .move_zeros import *
from .plus_one import *
from .rotate import *
from .summarize_ranges import *
from .three_sum import *
from .trimmean import *
from .top_1 import *
from .two_sum import *
from .limit import *
from .n_sum import *
|
keon/algorithms
|
algorithms/arrays/__init__.py
|
Python
|
mit
| 459
|
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
"""MatchIPinCIDRIndicators
"""
from typing import Dict, Any
import ipaddress
import traceback
''' STANDALONE FUNCTION '''
''' COMMAND FUNCTION '''
def match_ip_in_cidr_indicators(args: Dict[str, Any]) -> CommandResults:
"""
match_ip_in_cidr_indicators
Given ip address in the args dictionary returns the indicator of type CIDR with the
longest prefix matching the ip.
:type args: ``Dict[str, Any]``
:param args: Dictionary of arguments. Should contain the "ip" address, and optionally
a "tags" argument with a list of tags to filter indicators.
:return: Result of the search.
:rtype: ``CommandResults``
"""
ip = args.get('ip', None)
if not ip:
raise ValueError('ip not provided')
tags = argToList(args.get('tags', []))
keys = ['id', 'value', 'CustomFields', 'type', 'score', 'firstSeen', 'lastSeen',
'expiration', 'expirationStatus', 'sourceBrands', 'sourceInstances']
tagquery = f' and tags:({" OR ".join(tags)})' if tags else None
ranges = []
for r in range(32, 7, -1):
ranges.append(str(ipaddress.ip_network(f'{ip}/{r}', strict=False)))
joinexpr = '\" or value:\"'.join(ranges)
query = f'type:CIDR{tagquery} and ( value:"{joinexpr}")'
indicators = demisto.executeCommand("findIndicators", {"query": query, 'size': 32})
outputs = list()
if not isinstance(indicators, list) or len(indicators) < 1 or 'Contents' not in indicators[0]:
raise ValueError('No content')
longest_match = 0
found_ind: Dict = {}
for i in indicators[0]['Contents']:
if 'value' not in i:
continue
pfx = ipaddress.ip_network(i['value']).prefixlen
if pfx > longest_match:
longest_match = pfx
found_ind = i
oi = dict()
for k in found_ind.keys():
if k in keys:
oi[k] = i[k]
outputs.append(oi)
return CommandResults(
outputs_prefix='MatchingCIDRIndicator',
outputs_key_field='value',
outputs=outputs,
ignore_auto_extract=True
)
''' MAIN FUNCTION '''
def main():
try:
return_results(match_ip_in_cidr_indicators(demisto.args()))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute MatchIPinCIDRIndicators. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
demisto/content
|
Packs/ExpanseV2/Scripts/MatchIPinCIDRIndicators/MatchIPinCIDRIndicators.py
|
Python
|
mit
| 2,632
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/event_perk/shared_yavin_flag_deed.iff"
result.attribute_template_id = 2
result.stfName("event_perk","yavin_flag_deed_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/deed/event_perk/shared_yavin_flag_deed.py
|
Python
|
mit
| 465
|
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import AmazonProvider
class AmazonOAuth2Adapter(OAuth2Adapter):
provider_id = AmazonProvider.id
access_token_url = "https://api.amazon.com/auth/o2/token"
authorize_url = "http://www.amazon.com/ap/oa"
profile_url = "https://api.amazon.com/user/profile"
supports_state = False
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.get(self.profile_url, params={"access_token": token})
extra_data = response.json()
if "Profile" in extra_data:
extra_data = {
"user_id": extra_data["Profile"]["CustomerId"],
"name": extra_data["Profile"]["Name"],
"email": extra_data["Profile"]["PrimaryEmail"],
}
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(AmazonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(AmazonOAuth2Adapter)
|
pennersr/django-allauth
|
allauth/socialaccount/providers/amazon/views.py
|
Python
|
mit
| 1,152
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_pants_formal_38.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_pants_formal_38.py
|
Python
|
mit
| 462
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_swirl_prong_hue.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/intangible/pet/shared_swirl_prong_hue.py
|
Python
|
mit
| 428
|
"""
A place to Get Stuff Out Of Views.
As this fills up, move into different places...this is just a handy bucket for
now, until we understand better what organization we need.
"""
import requests
import re
def remove_script_tags(str):
inside = re.match("<script[^>]+>(.+)</script>", str)
if inside:
return inside.group(1)
else:
return ""
def bust_caches(resp):
headers = {
"Cache-Control": "no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": 0
}
for k, v in headers.iteritems():
resp.headers[k] = v
return resp
|
total-impact/total-impact-webapp
|
totalimpactwebapp/views_helpers.py
|
Python
|
mit
| 624
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Pure AC3 file information.
"""
__all__ = ["AC3", "Open"]
from mutagen import StreamInfo
from mutagen._file import FileType
from mutagen._util import (
BitReader,
BitReaderError,
MutagenError,
convert_error,
enum,
loadfile,
endswith,
)
@enum
class ChannelMode(object):
DUALMONO = 0
MONO = 1
STEREO = 2
C3F = 3
C2F1R = 4
C3F1R = 5
C2F2R = 6
C3F2R = 7
AC3_CHANNELS = {
ChannelMode.DUALMONO: 2,
ChannelMode.MONO: 1,
ChannelMode.STEREO: 2,
ChannelMode.C3F: 3,
ChannelMode.C2F1R: 3,
ChannelMode.C3F1R: 4,
ChannelMode.C2F2R: 4,
ChannelMode.C3F2R: 5
}
AC3_HEADER_SIZE = 7
AC3_SAMPLE_RATES = [48000, 44100, 32000]
AC3_BITRATES = [
32, 40, 48, 56, 64, 80, 96, 112, 128,
160, 192, 224, 256, 320, 384, 448, 512, 576, 640
]
@enum
class EAC3FrameType(object):
INDEPENDENT = 0
DEPENDENT = 1
AC3_CONVERT = 2
RESERVED = 3
EAC3_BLOCKS = [1, 2, 3, 6]
class AC3Error(MutagenError):
pass
class AC3Info(StreamInfo):
"""AC3 stream information.
The length of the stream is just a guess and might not be correct.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bitrate (`int`): audio bitrate, in bits per second
codec (`str`): ac-3 or ec-3 (Enhanced AC-3)
"""
channels = 0
length = 0
sample_rate = 0
bitrate = 0
codec = 'ac-3'
@convert_error(IOError, AC3Error)
def __init__(self, fileobj):
"""Raises AC3Error"""
header = bytearray(fileobj.read(6))
if len(header) < 6:
raise AC3Error("not enough data")
if not header.startswith(b"\x0b\x77"):
raise AC3Error("not a AC3 file")
bitstream_id = header[5] >> 3
if bitstream_id > 16:
raise AC3Error("invalid bitstream_id %i" % bitstream_id)
fileobj.seek(2)
self._read_header(fileobj, bitstream_id)
def _read_header(self, fileobj, bitstream_id):
bitreader = BitReader(fileobj)
try:
# This is partially based on code from
# https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/ac3_parser.c
if bitstream_id <= 10: # Normal AC-3
self._read_header_normal(bitreader, bitstream_id)
else: # Enhanced AC-3
self._read_header_enhanced(bitreader)
except BitReaderError as e:
raise AC3Error(e)
self.length = self._guess_length(fileobj)
def _read_header_normal(self, bitreader, bitstream_id):
r = bitreader
r.skip(16) # 16 bit CRC
sr_code = r.bits(2)
if sr_code == 3:
raise AC3Error("invalid sample rate code %i" % sr_code)
frame_size_code = r.bits(6)
if frame_size_code > 37:
raise AC3Error("invalid frame size code %i" % frame_size_code)
r.skip(5) # bitstream ID, already read
r.skip(3) # bitstream mode, not needed
channel_mode = ChannelMode(r.bits(3))
r.skip(2) # dolby surround mode or surround mix level
lfe_on = r.bits(1)
sr_shift = max(bitstream_id, 8) - 8
try:
self.sample_rate = AC3_SAMPLE_RATES[sr_code] >> sr_shift
self.bitrate = (AC3_BITRATES[frame_size_code >> 1] * 1000
) >> sr_shift
except KeyError as e:
raise AC3Error(e)
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_normal(r, channel_mode)
def _read_header_enhanced(self, bitreader):
r = bitreader
self.codec = "ec-3"
frame_type = r.bits(2)
if frame_type == EAC3FrameType.RESERVED:
raise AC3Error("invalid frame type %i" % frame_type)
r.skip(3) # substream ID, not needed
frame_size = (r.bits(11) + 1) << 1
if frame_size < AC3_HEADER_SIZE:
raise AC3Error("invalid frame size %i" % frame_size)
sr_code = r.bits(2)
try:
if sr_code == 3:
sr_code2 = r.bits(2)
if sr_code2 == 3:
raise AC3Error("invalid sample rate code %i" % sr_code2)
numblocks_code = 3
self.sample_rate = AC3_SAMPLE_RATES[sr_code2] // 2
else:
numblocks_code = r.bits(2)
self.sample_rate = AC3_SAMPLE_RATES[sr_code]
channel_mode = ChannelMode(r.bits(3))
lfe_on = r.bits(1)
self.bitrate = 8 * frame_size * self.sample_rate // (
EAC3_BLOCKS[numblocks_code] * 256)
except KeyError as e:
raise AC3Error(e)
r.skip(5) # bitstream ID, already read
self.channels = self._get_channels(channel_mode, lfe_on)
self._skip_unused_header_bits_enhanced(
r, frame_type, channel_mode, sr_code, numblocks_code)
@staticmethod
def _skip_unused_header_bits_normal(bitreader, channel_mode):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if r.bits(1): # Language Code Exists
r.skip(8) # Language Code
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
r.skip(7)
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if r.bits(1): # Language Code Exists, ch2
r.skip(8) # Language Code, ch2
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
r.skip(7)
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(2)
timecod1e = r.bits(1) # Time Code First Halve Exists
timecod2e = r.bits(1) # Time Code Second Halve Exists
if timecod1e:
r.skip(14) # Time Code First Half
if timecod2e:
r.skip(14) # Time Code Second Half
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _skip_unused_header_bits_enhanced(bitreader, frame_type, channel_mode,
sr_code, numblocks_code):
r = bitreader
r.skip(5) # Dialogue Normalization
if r.bits(1): # Compression Gain Word Exists
r.skip(8) # Compression Gain Word
if channel_mode == ChannelMode.DUALMONO:
r.skip(5) # Dialogue Normalization, ch2
if r.bits(1): # Compression Gain Word Exists, ch2
r.skip(8) # Compression Gain Word, ch2
if frame_type == EAC3FrameType.DEPENDENT:
if r.bits(1): # chanmap exists
r.skip(16) # chanmap
if r.bits(1): # mixmdate, 1 Bit
# FIXME: Handle channel dependent fields
return
if r.bits(1): # Informational Metadata Exists
# bsmod, 3 Bits
# Copyright Bit, 1 Bit
# Original Bit Stream, 1 Bit
r.skip(5)
if channel_mode == ChannelMode.STEREO:
# dsurmod. 2 Bits
# dheadphonmod, 2 Bits
r.skip(4)
elif channel_mode >= ChannelMode.C2F2R:
r.skip(2) # dsurexmod
if r.bits(1): # Audio Production Information Exists
# Mixing Level, 5 Bits
# Room Type, 2 Bits
# adconvtyp, 1 Bit
r.skip(8)
if channel_mode == ChannelMode.DUALMONO:
if r.bits(1): # Audio Production Information Exists, ch2
# Mixing Level, ch2, 5 Bits
# Room Type, ch2, 2 Bits
# adconvtyp, ch2, 1 Bit
r.skip(8)
if sr_code < 3: # if not half sample rate
r.skip(1) # sourcefscod
if frame_type == EAC3FrameType.INDEPENDENT and numblocks_code == 3:
r.skip(1) # convsync
if frame_type == EAC3FrameType.AC3_CONVERT:
if numblocks_code != 3:
if r.bits(1): # blkid
r.skip(6) # frmsizecod
if r.bits(1): # Additional Bit Stream Information Exists
addbsil = r.bit(6) # Additional Bit Stream Information Length
r.skip((addbsil + 1) * 8)
@staticmethod
def _get_channels(channel_mode, lfe_on):
try:
return AC3_CHANNELS[channel_mode] + lfe_on
except KeyError as e:
raise AC3Error(e)
def _guess_length(self, fileobj):
# use bitrate + data size to guess length
if self.bitrate == 0:
return
start = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - start
return 8.0 * length / self.bitrate
def pprint(self):
return u"%s, %d Hz, %.2f seconds, %d channel(s), %d bps" % (
self.codec, self.sample_rate, self.length, self.channels,
self.bitrate)
class AC3(FileType):
"""AC3(filething)
Arguments:
filething (filething)
Load AC3 or EAC3 files.
Tagging is not supported.
Use the ID3/APEv2 classes directly instead.
Attributes:
info (`AC3Info`)
"""
_mimes = ["audio/ac3"]
@loadfile()
def load(self, filething):
self.info = AC3Info(filething.fileobj)
def add_tags(self):
raise AC3Error("doesn't support tags")
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"\x0b\x77") * 2 \
+ (endswith(filename, ".ac3") or endswith(filename, ".eac3"))
Open = AC3
error = AC3Error
|
lucienimmink/scanner.py
|
mutagen/ac3.py
|
Python
|
mit
| 10,451
|
#!/usr/bin/env python
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these
multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
def run_calc():
num = 1000
results = []
for x in range(1, num):
if x % 3 == 0:
results.append(x)
elif x % 5 == 0:
results.append(x)
return sum(results)
def test_function():
assert run_calc() == 233168
if __name__ == '__main__':
test_function()
|
marshallhumble/Euler_Groovy
|
Project-Euler/python/1.py
|
Python
|
mit
| 535
|
################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""cpqScsiCntlr
cpqScsiCntlr is an abstraction of a HP SCSI Controller.
$Id: cpqScsiCntlr.py,v 1.1 2010/06/30 16:27:54 egor Exp $"""
__version__ = "$Revision: 1.1 $"[11:-2]
from HPExpansionCard import *
class cpqScsiCntlr(HPExpansionCard):
"""SCSI Cntrl object"""
model = ""
FWRev = ""
scsiwidth = ""
# we monitor RAID Controllers
monitor = True
_properties = HPExpansionCard._properties + (
{'id':'model', 'type':'string', 'mode':'w'},
{'id':'FWRev', 'type':'string', 'mode':'w'},
{'id':'scsiwidth', 'type':'string', 'mode':'w'},
)
factory_type_information = (
{
'id' : 'cpqScsiCntlr',
'meta_type' : 'cpqScsiCntlr',
'description' : """Arbitrary device grouping class""",
'icon' : 'ExpansionCard_icon.gif',
'product' : 'ZenModel',
'factory' : 'manage_addCpqScsiCntlr',
'immediate_view' : 'viewCpqScsiCntlr',
'actions' :
(
{ 'id' : 'status'
, 'name' : 'Status'
, 'action' : 'viewCpqScsiCntlr'
, 'permissions' : (ZEN_VIEW,)
},
{ 'id' : 'perfConf'
, 'name' : 'Template'
, 'action' : 'objTemplates'
, 'permissions' : (ZEN_CHANGE_DEVICE, )
},
{ 'id' : 'viewHistory'
, 'name' : 'Modifications'
, 'action' : 'viewHistory'
, 'permissions' : (ZEN_VIEW_MODIFICATIONS,)
},
)
},
)
InitializeClass(cpqScsiCntlr)
|
anksp21/Community-Zenpacks
|
ZenPacks.community.HPMon/ZenPacks/community/HPMon/cpqScsiCntlr.py
|
Python
|
gpl-2.0
| 2,211
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
vault based providers
"""
from twisted.internet import defer
from buildbot import config
from buildbot.secrets.providers.base import SecretProviderBase
from buildbot.util import httpclientservice
from buildbot.warnings import warn_deprecated
class HashiCorpVaultSecretProvider(SecretProviderBase):
"""
basic provider where each secret is stored in Vault KV secret engine
"""
name = 'SecretInVault'
def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,
apiVersion=1):
warn_deprecated("3.4.0", "Use of HashiCorpVaultSecretProvider is deprecated and will be "
"removed in future releases. Use HashiCorpVaultKvSecretProvider instead")
if not isinstance(vaultServer, str):
config.error(f"vaultServer must be a string while it is {type(vaultServer)}")
if not isinstance(vaultToken, str):
config.error(f"vaultToken must be a string while it is {type(vaultToken)}")
if apiVersion not in [1, 2]:
config.error(f"apiVersion {apiVersion} is not supported")
@defer.inlineCallbacks
def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,
apiVersion=1):
if secretsmount is None:
self.secretsmount = "secret"
else:
self.secretsmount = secretsmount
self.vaultServer = vaultServer
self.vaultToken = vaultToken
self.apiVersion = apiVersion
if vaultServer.endswith('/'):
vaultServer = vaultServer[:-1]
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})
@defer.inlineCallbacks
def get(self, entry):
"""
get the value from vault secret backend
"""
parts = entry.rsplit('/', maxsplit=1)
name = parts[0]
if len(parts) > 1:
key = parts[1]
else:
key = 'value'
if self.apiVersion == 1:
path = self.secretsmount + '/' + name
else:
path = self.secretsmount + '/data/' + name
# note that the HTTP path contains v1 for both versions of the key-value
# secret engine. Different versions of the key-value engine are
# effectively separate secret engines in vault, with the same base HTTP
# API, but with different paths within it.
proj = yield self._http.get(f"/v1/{path}")
code = yield proj.code
if code != 200:
raise KeyError(f"The secret {entry} does not exist in Vault provider: request"
f" return code: {code}.")
json = yield proj.json()
if self.apiVersion == 1:
secrets = json.get('data', {})
else:
secrets = json.get('data', {}).get('data', {})
try:
return secrets[key]
except KeyError as e:
raise KeyError(
f"The secret {entry} does not exist in Vault provider: {e}") from e
|
pmisik/buildbot
|
master/buildbot/secrets/providers/vault.py
|
Python
|
gpl-2.0
| 3,801
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptDialog.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import codecs
import inspect
import traceback
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QCursor
from qgis.PyQt.QtWidgets import (QMessageBox,
QFileDialog)
from qgis.gui import QgsGui, QgsErrorDialog
from qgis.core import (QgsApplication,
QgsSettings,
QgsError,
QgsProcessingAlgorithm,
QgsProcessingFeatureBasedAlgorithm)
from qgis.utils import iface, OverrideCursor
from processing.gui.AlgorithmDialog import AlgorithmDialog
from processing.script import ScriptUtils
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, "ui", "DlgScriptEditor.ui"))
class ScriptEditorDialog(BASE, WIDGET):
hasChanged = False
def __init__(self, filePath=None, parent=None):
super(ScriptEditorDialog, self).__init__(parent)
self.setupUi(self)
QgsGui.instance().enableAutoGeometryRestore(self)
self.editor.initLexer()
self.searchWidget.setVisible(False)
if iface is not None:
self.toolBar.setIconSize(iface.iconSize())
self.setStyleSheet(iface.mainWindow().styleSheet())
self.actionOpenScript.setIcon(
QgsApplication.getThemeIcon('/mActionScriptOpen.svg'))
self.actionSaveScript.setIcon(
QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.actionSaveScriptAs.setIcon(
QgsApplication.getThemeIcon('/mActionFileSaveAs.svg'))
self.actionRunScript.setIcon(
QgsApplication.getThemeIcon('/mActionStart.svg'))
self.actionCut.setIcon(
QgsApplication.getThemeIcon('/mActionEditCut.svg'))
self.actionCopy.setIcon(
QgsApplication.getThemeIcon('/mActionEditCopy.svg'))
self.actionPaste.setIcon(
QgsApplication.getThemeIcon('/mActionEditPaste.svg'))
self.actionUndo.setIcon(
QgsApplication.getThemeIcon('/mActionUndo.svg'))
self.actionRedo.setIcon(
QgsApplication.getThemeIcon('/mActionRedo.svg'))
self.actionFindReplace.setIcon(
QgsApplication.getThemeIcon('/mActionFindReplace.svg'))
self.actionIncreaseFontSize.setIcon(
QgsApplication.getThemeIcon('/mActionIncreaseFont.svg'))
self.actionDecreaseFontSize.setIcon(
QgsApplication.getThemeIcon('/mActionDecreaseFont.svg'))
# Connect signals and slots
self.actionOpenScript.triggered.connect(self.openScript)
self.actionSaveScript.triggered.connect(self.save)
self.actionSaveScriptAs.triggered.connect(self.saveAs)
self.actionRunScript.triggered.connect(self.runAlgorithm)
self.actionCut.triggered.connect(self.editor.cut)
self.actionCopy.triggered.connect(self.editor.copy)
self.actionPaste.triggered.connect(self.editor.paste)
self.actionUndo.triggered.connect(self.editor.undo)
self.actionRedo.triggered.connect(self.editor.redo)
self.actionFindReplace.toggled.connect(self.toggleSearchBox)
self.actionIncreaseFontSize.triggered.connect(self.editor.zoomIn)
self.actionDecreaseFontSize.triggered.connect(self.editor.zoomOut)
self.editor.textChanged.connect(lambda: self.setHasChanged(True))
self.leFindText.returnPressed.connect(self.find)
self.btnFind.clicked.connect(self.find)
self.btnReplace.clicked.connect(self.replace)
self.lastSearch = None
self.filePath = None
if filePath is not None:
self._loadFile(filePath)
self.needUpdate = False
self.setHasChanged(False)
def update_dialog_title(self):
"""
Updates the script editor dialog title
"""
if self.filePath:
path, file_name = os.path.split(self.filePath)
else:
file_name = self.tr('Untitled Script')
if self.hasChanged:
file_name = '*' + file_name
self.setWindowTitle(self.tr('{} - Processing Script Editor').format(file_name))
def closeEvent(self, event):
settings = QgsSettings()
settings.setValue("/Processing/stateScriptEditor", self.saveState())
settings.setValue("/Processing/geometryScriptEditor", self.saveGeometry())
if self.hasChanged:
ret = QMessageBox.question(
self, self.tr('Save Script?'),
self.tr('There are unsaved changes in this script. Do you want to keep those?'),
QMessageBox.Save | QMessageBox.Cancel | QMessageBox.Discard, QMessageBox.Cancel)
if ret == QMessageBox.Save:
self.updateProvider()
self.saveScript(False)
event.accept()
elif ret == QMessageBox.Discard:
self.updateProvider()
event.accept()
else:
event.ignore()
else:
event.accept()
def updateProvider(self):
if self.needUpdate:
QgsApplication.processingRegistry().providerById("script").refreshAlgorithms()
def openScript(self):
if self.hasChanged:
ret = QMessageBox.warning(self,
self.tr("Unsaved changes"),
self.tr("There are unsaved changes in the script. Continue?"),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.No:
return
scriptDir = ScriptUtils.scriptsFolders()[0]
fileName, _ = QFileDialog.getOpenFileName(self,
self.tr("Open script"),
scriptDir,
self.tr("Processing scripts (*.py *.PY)"))
if fileName == "":
return
with OverrideCursor(Qt.WaitCursor):
self._loadFile(fileName)
def save(self):
self.saveScript(False)
def saveAs(self):
self.saveScript(True)
def saveScript(self, saveAs):
newPath = None
if self.filePath is None or saveAs:
scriptDir = ScriptUtils.scriptsFolders()[0]
newPath, _ = QFileDialog.getSaveFileName(self,
self.tr("Save script"),
scriptDir,
self.tr("Processing scripts (*.py *.PY)"))
if newPath:
if not newPath.lower().endswith(".py"):
newPath += ".py"
self.filePath = newPath
if self.filePath:
text = self.editor.text()
try:
with codecs.open(self.filePath, "w", encoding="utf-8") as f:
f.write(text)
except IOError as e:
QMessageBox.warning(self,
self.tr("I/O error"),
self.tr("Unable to save edits:\n{}").format(str(e))
)
return
self.needUpdate = True
self.setHasChanged(False)
def setHasChanged(self, hasChanged):
self.hasChanged = hasChanged
self.actionSaveScript.setEnabled(hasChanged)
self.update_dialog_title()
def runAlgorithm(self):
d = {}
try:
exec(self.editor.text(), d)
except Exception as e:
error = QgsError(traceback.format_exc(), "Processing")
QgsErrorDialog.show(error,
self.tr("Execution error")
)
return
alg = None
for k, v in d.items():
if inspect.isclass(v) and issubclass(v, (QgsProcessingAlgorithm, QgsProcessingFeatureBasedAlgorithm)) and v.__name__ not in ("QgsProcessingAlgorithm", "QgsProcessingFeatureBasedAlgorithm"):
alg = v()
break
if alg is None:
QMessageBox.warning(self,
self.tr("No script found"),
self.tr("Seems there is no valid script in the file.")
)
return
alg.setProvider(QgsApplication.processingRegistry().providerById("script"))
alg.initAlgorithm()
dlg = alg.createCustomParametersWidget(self)
if not dlg:
dlg = AlgorithmDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
def find(self):
textToFind = self.leFindText.text()
caseSensitive = self.chkCaseSensitive.isChecked()
wholeWord = self.chkWholeWord.isChecked()
if self.lastSearch is None or textToFind != self.lastSearch:
self.editor.findFirst(textToFind, False, caseSensitive, wholeWord, True)
else:
self.editor.findNext()
def replace(self):
textToReplace = self.leReplaceText.text()
self.editor.replaceSelectedText(textToReplace)
def toggleSearchBox(self, checked):
self.searchWidget.setVisible(checked)
if (checked):
self.leFindText.setFocus()
def _loadFile(self, filePath):
with codecs.open(filePath, "r", encoding="utf-8") as f:
txt = f.read()
self.editor.setText(txt)
self.hasChanged = False
self.editor.setModified(False)
self.editor.recolor()
self.filePath = filePath
self.update_dialog_title()
|
raymondnijssen/QGIS
|
python/plugins/processing/script/ScriptEditorDialog.py
|
Python
|
gpl-2.0
| 11,181
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Storage testcase using xfstests.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Id: tdStorageStress1.py $"
# Standard Python imports.
import os;
import sys;
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import base;
from testdriver import vbox;
from testdriver import vboxcon;
def _ControllerTypeToName(eControllerType):
""" Translate a controller type to a name. """
if eControllerType == vboxcon.StorageControllerType_PIIX3 or eControllerType == vboxcon.StorageControllerType_PIIX4:
sType = "IDE Controller";
elif eControllerType == vboxcon.StorageControllerType_IntelAhci:
sType = "SATA Controller";
elif eControllerType == vboxcon.StorageControllerType_LsiLogicSas:
sType = "SAS Controller";
elif eControllerType == vboxcon.StorageControllerType_LsiLogic or eControllerType == vboxcon.StorageControllerType_BusLogic:
sType = "SCSI Controller";
else:
sType = "Storage Controller";
return sType;
class tdStorageStress(vbox.TestDriver): # pylint: disable=R0902
"""
Storage testcase.
"""
def __init__(self):
vbox.TestDriver.__init__(self);
self.asRsrcs = None;
self.oGuestToGuestVM = None;
self.oGuestToGuestSess = None;
self.oGuestToGuestTxs = None;
self.asTestVMsDef = ['tst-debian'];
self.asTestVMs = self.asTestVMsDef;
self.asSkipVMs = [];
self.asVirtModesDef = ['hwvirt', 'hwvirt-np', 'raw',]
self.asVirtModes = self.asVirtModesDef
self.acCpusDef = [1, 2,]
self.acCpus = self.acCpusDef;
self.asStorageCtrlsDef = ['AHCI', 'IDE', 'LsiLogicSAS', 'LsiLogic', 'BusLogic'];
self.asStorageCtrls = self.asStorageCtrlsDef;
self.asDiskFormatsDef = ['VDI', 'VMDK', 'VHD', 'QED', 'Parallels', 'QCOW'];
self.asDiskFormats = self.asDiskFormatsDef;
self.asTestsDef = ['xfstests'];
self.asTests = self.asTestsDef;
self.asGuestFs = ['xfs', 'ext4', 'btrfs'];
self.asGuestFsDef = self.asGuestFs;
self.asIscsiTargetsDef = ['aurora|iqn.2011-03.home.aurora:aurora.storagebench|1'];
self.asIscsiTargets = self.asIscsiTargetsDef;
self.asDirsDef = ['/run/media/alexander/OWCSSD/alexander', \
'/run/media/alexander/CrucialSSD/alexander', \
'/run/media/alexander/HardDisk/alexander', \
'/home/alexander'];
self.asDirs = self.asDirsDef;
#
# Overridden methods.
#
def showUsage(self):
rc = vbox.TestDriver.showUsage(self);
reporter.log('');
reporter.log('tdStorageBenchmark1 Options:');
reporter.log(' --virt-modes <m1[:m2[:]]');
reporter.log(' Default: %s' % (':'.join(self.asVirtModesDef)));
reporter.log(' --cpu-counts <c1[:c2[:]]');
reporter.log(' Default: %s' % (':'.join(str(c) for c in self.acCpusDef)));
reporter.log(' --storage-ctrls <type1[:type2[:...]]>');
reporter.log(' Default: %s' % (':'.join(self.asStorageCtrls)));
reporter.log(' --disk-formats <type1[:type2[:...]]>');
reporter.log(' Default: %s' % (':'.join(self.asDiskFormats)));
reporter.log(' --disk-dirs <path1[:path2[:...]]>');
reporter.log(' Default: %s' % (':'.join(self.asDirs)));
reporter.log(' --iscsi-targets <target1[:target2[:...]]>');
reporter.log(' Default: %s' % (':'.join(self.asIscsiTargets)));
reporter.log(' --tests <test1[:test2[:...]]>');
reporter.log(' Default: %s' % (':'.join(self.asTests)));
reporter.log(' --guest-fs <fs1[:fs2[:...]]>');
reporter.log(' Default: %s' % (':'.join(self.asGuestFs)));
reporter.log(' --test-vms <vm1[:vm2[:...]]>');
reporter.log(' Test the specified VMs in the given order. Use this to change');
reporter.log(' the execution order or limit the choice of VMs');
reporter.log(' Default: %s (all)' % (':'.join(self.asTestVMsDef)));
reporter.log(' --skip-vms <vm1[:vm2[:...]]>');
reporter.log(' Skip the specified VMs when testing.');
return rc;
def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
if asArgs[iArg] == '--virt-modes':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--virt-modes" takes a colon separated list of modes');
self.asVirtModes = asArgs[iArg].split(':');
for s in self.asVirtModes:
if s not in self.asVirtModesDef:
raise base.InvalidOption('The "--virt-modes" value "%s" is not valid; valid values are: %s' \
% (s, ' '.join(self.asVirtModesDef)));
elif asArgs[iArg] == '--cpu-counts':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--cpu-counts" takes a colon separated list of cpu counts');
self.acCpus = [];
for s in asArgs[iArg].split(':'):
try: c = int(s);
except: raise base.InvalidOption('The "--cpu-counts" value "%s" is not an integer' % (s,));
if c <= 0: raise base.InvalidOption('The "--cpu-counts" value "%s" is zero or negative' % (s,));
self.acCpus.append(c);
elif asArgs[iArg] == '--storage-ctrls':
iArg += 1;
if iArg >= len(asArgs):
raise base.InvalidOption('The "--storage-ctrls" takes a colon separated list of Storage controller types');
self.asStorageCtrls = asArgs[iArg].split(':');
elif asArgs[iArg] == '--disk-formats':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-formats" takes a colon separated list of disk formats');
self.asDiskFormats = asArgs[iArg].split(':');
elif asArgs[iArg] == '--disk-dirs':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--disk-dirs" takes a colon separated list of directories');
self.asDirs = asArgs[iArg].split(':');
elif asArgs[iArg] == '--iscsi-targets':
iArg += 1;
if iArg >= len(asArgs):
raise base.InvalidOption('The "--iscsi-targets" takes a colon separated list of iscsi targets');
self.asIscsiTargets = asArgs[iArg].split(':');
elif asArgs[iArg] == '--tests':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of disk formats');
self.asTests = asArgs[iArg].split(':');
elif asArgs[iArg] == '--guest-fs':
iArg += 1;
if iArg >= len(asArgs):
raise base.InvalidOption('The "--guest-fs" takes a colon separated list of filesystem identifiers');
self.asGuestFs = asArgs[iArg].split(':');
elif asArgs[iArg] == '--test-vms':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--test-vms" takes colon separated list');
self.asTestVMs = asArgs[iArg].split(':');
for s in self.asTestVMs:
if s not in self.asTestVMsDef:
raise base.InvalidOption('The "--test-vms" value "%s" is not valid; valid values are: %s' \
% (s, ' '.join(self.asTestVMsDef)));
elif asArgs[iArg] == '--skip-vms':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--skip-vms" takes colon separated list');
self.asSkipVMs = asArgs[iArg].split(':');
for s in self.asSkipVMs:
if s not in self.asTestVMsDef:
reporter.log('warning: The "--test-vms" value "%s" does not specify any of our test VMs.' % (s));
else:
return vbox.TestDriver.parseOption(self, asArgs, iArg);
return iArg + 1;
def completeOptions(self):
# Remove skipped VMs from the test list.
for sVM in self.asSkipVMs:
try: self.asTestVMs.remove(sVM);
except: pass;
return vbox.TestDriver.completeOptions(self);
def getResourceSet(self):
# Construct the resource list the first time it's queried.
if self.asRsrcs is None:
self.asRsrcs = [];
if 'tst-debian' in self.asTestVMs:
self.asRsrcs.append('4.2/storage/debian.vdi');
return self.asRsrcs;
def actionConfig(self):
# Some stupid trickery to guess the location of the iso. ## fixme - testsuite unzip ++
sVBoxValidationKit_iso = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../VBoxValidationKitStorIo.iso'));
if not os.path.isfile(sVBoxValidationKit_iso):
sVBoxValidationKit_iso = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../VBoxValidationKitStorIo.iso'));
if not os.path.isfile(sVBoxValidationKit_iso):
sVBoxValidationKit_iso = '/mnt/ramdisk/vbox/svn/trunk/validationkit/VBoxValidationKitStorIo.iso';
if not os.path.isfile(sVBoxValidationKit_iso):
sVBoxValidationKit_iso = '/mnt/ramdisk/vbox/svn/trunk/testsuite/VBoxTestSuiteStorIo.iso';
if not os.path.isfile(sVBoxValidationKit_iso):
sCur = os.getcwd();
for i in range(0, 10):
sVBoxValidationKit_iso = os.path.join(sCur, 'validationkit/VBoxValidationKitStorIo.iso');
if os.path.isfile(sVBoxValidationKit_iso):
break;
sVBoxValidationKit_iso = os.path.join(sCur, 'testsuite/VBoxTestSuiteStorIo.iso');
if os.path.isfile(sVBoxValidationKit_iso):
break;
sCur = os.path.abspath(os.path.join(sCur, '..'));
if i is None: pass; # shut up pychecker/pylint.
if not os.path.isfile(sVBoxValidationKit_iso):
sVBoxValidationKit_iso = '/mnt/VirtualBox/VBoxValidationKitStorIo.iso';
if not os.path.isfile(sVBoxValidationKit_iso):
sVBoxValidationKit_iso = '/mnt/VirtualBox/VBoxTestSuiteStorIo.iso';
# Make sure vboxapi has been imported so we can use the constants.
if not self.importVBoxApi():
return False;
#
# Configure the VMs we're going to use.
#
# Linux VMs
if 'tst-debian' in self.asTestVMs:
oVM = self.createTestVM('tst-debian', 1, '4.2/storage/debian.vdi', sKind = 'Debian_64', fIoApic = True, \
eNic0AttachType = vboxcon.NetworkAttachmentType_NAT, \
eNic0Type = vboxcon.NetworkAdapterType_Am79C973, \
sDvdImage = sVBoxValidationKit_iso);
if oVM is None:
return False;
return True;
def actionExecute(self):
"""
Execute the testcase.
"""
fRc = self.test1();
return fRc;
#
# Test execution helpers.
#
def test1RunTestProgs(self, oSession, oTxsSession, fRc, sTestName, sGuestFs):
"""
Runs all the test programs on the test machine.
"""
_ = oSession;
reporter.testStart(sTestName);
sMkfsCmd = 'mkfs.' + sGuestFs;
# Prepare test disks, just create filesystem without partition
reporter.testStart('Preparation');
fRc = fRc and self.txsRunTest(oTxsSession, 'Create FS 1', 60000, \
'/sbin/' + sMkfsCmd,
(sMkfsCmd, '/dev/sdb'));
fRc = fRc and self.txsRunTest(oTxsSession, 'Create FS 2', 60000, \
'/sbin/' + sMkfsCmd,
(sMkfsCmd, '/dev/sdc'));
# Create test and scratch directory
fRc = fRc and self.txsRunTest(oTxsSession, 'Create /mnt/test', 10000, \
'/bin/mkdir',
('mkdir', '/mnt/test'));
fRc = fRc and self.txsRunTest(oTxsSession, 'Create /mnt/scratch', 10000, \
'/bin/mkdir',
('mkdir', '/mnt/scratch'));
# Mount test and scratch directory.
fRc = fRc and self.txsRunTest(oTxsSession, 'Mount /mnt/test', 10000, \
'/bin/mount',
('mount', '/dev/sdb','/mnt/test'));
fRc = fRc and self.txsRunTest(oTxsSession, 'Mount /mnt/scratch', 10000, \
'/bin/mount',
('mount', '/dev/sdc','/mnt/scratch'));
fRc = fRc and self.txsRunTest(oTxsSession, 'Copying xfstests', 10000, \
'/bin/cp',
('cp', '-r','${CDROM}/${OS.ARCH}/xfstests', '/tmp'));
reporter.testDone();
# Run xfstests (this sh + cd crap is required because the cwd for the script must be in the root
# of the xfstests directory...)
reporter.testStart('xfstests');
if fRc and 'xfstests' in self.asTests:
fRc = self.txsRunTest(oTxsSession, 'xfstests', 3600000, \
'/bin/sh',
('sh', '-c', '(cd /tmp/xfstests && ./check -g auto)'), \
('TEST_DIR=/mnt/test', 'TEST_DEV=/dev/sdb', 'SCRATCH_MNT=/mnt/scratch', 'SCRATCH_DEV=/dev/sdc', \
'FSTYP=' + sGuestFs));
reporter.testDone();
else:
reporter.testDone(fSkipped = True);
reporter.testDone(not fRc);
return fRc;
# pylint: disable=R0913
def test1OneCfg(self, sVmName, eStorageController, sDiskFormat, sDiskPath1, sDiskPath2, \
sGuestFs, cCpus, fHwVirt, fNestedPaging):
"""
Runs the specified VM thru test #1.
Returns a success indicator on the general test execution. This is not
the actual test result.
"""
oVM = self.getVmByName(sVmName);
# Reconfigure the VM
fRc = True;
oSession = self.openSession(oVM);
if oSession is not None:
# Attach HD
fRc = oSession.ensureControllerAttached(_ControllerTypeToName(eStorageController));
fRc = fRc and oSession.setStorageControllerType(eStorageController, _ControllerTypeToName(eStorageController));
if sDiskFormat == "iSCSI":
listNames = [];
listValues = [];
listValues = sDiskPath1.split('|');
listNames.append('TargetAddress');
listNames.append('TargetName');
listNames.append('LUN');
if self.fpApiVer >= 5.0:
oHd = oSession.oVBox.createMedium(sDiskFormat, sDiskPath1, vboxcon.AccessMode_ReadWrite, \
vboxcon.DeviceType_HardDisk);
else:
oHd = oSession.oVBox.createHardDisk(sDiskFormat, sDiskPath1);
oHd.type = vboxcon.MediumType_Normal;
oHd.setProperties(listNames, listValues);
# Attach it.
if fRc is True:
try:
if oSession.fpApiVer >= 4.0:
oSession.o.machine.attachDevice(_ControllerTypeToName(eStorageController), \
1, 0, vboxcon.DeviceType_HardDisk, oHd);
else:
oSession.o.machine.attachDevice(_ControllerTypeToName(eStorageController), \
1, 0, vboxcon.DeviceType_HardDisk, oHd.id);
except:
reporter.errorXcpt('attachDevice("%s",%s,%s,HardDisk,"%s") failed on "%s"' \
% (_ControllerTypeToName(eStorageController), 1, 0, oHd.id, oSession.sName) );
fRc = False;
else:
reporter.log('attached "%s" to %s' % (sDiskPath1, oSession.sName));
else:
fRc = fRc and oSession.createAndAttachHd(sDiskPath1, sDiskFormat, _ControllerTypeToName(eStorageController), \
cb = 10*1024*1024*1024, iPort = 1, fImmutable = False);
fRc = fRc and oSession.createAndAttachHd(sDiskPath2, sDiskFormat, _ControllerTypeToName(eStorageController), \
cb = 10*1024*1024*1024, iPort = 2, fImmutable = False);
fRc = fRc and oSession.enableVirtEx(fHwVirt);
fRc = fRc and oSession.enableNestedPaging(fNestedPaging);
fRc = fRc and oSession.setCpuCount(cCpus);
fRc = fRc and oSession.saveSettings();
fRc = oSession.close() and fRc and True; # pychecker hack.
oSession = None;
else:
fRc = False;
# Start up.
if fRc is True:
self.logVmInfo(oVM);
oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(sVmName, fCdWait = False, fNatForwardingForTxs = True);
if oSession is not None:
self.addTask(oSession);
# Fudge factor - Allow the guest to finish starting up.
self.sleep(5);
fRc = self.test1RunTestProgs(oSession, oTxsSession, fRc, 'stress testing', sGuestFs);
# cleanup.
self.removeTask(oTxsSession);
self.terminateVmBySession(oSession)
# Remove disk
oSession = self.openSession(oVM);
if oSession is not None:
try:
oSession.o.machine.detachDevice(_ControllerTypeToName(eStorageController), 1, 0);
oSession.o.machine.detachDevice(_ControllerTypeToName(eStorageController), 2, 0);
# Remove storage controller if it is not an IDE controller.
if eStorageController is not vboxcon.StorageControllerType_PIIX3 \
and eStorageController is not vboxcon.StorageControllerType_PIIX4:
oSession.o.machine.removeStorageController(_ControllerTypeToName(eStorageController));
oSession.saveSettings();
oSession.oVBox.deleteHdByLocation(sDiskPath1);
oSession.oVBox.deleteHdByLocation(sDiskPath2);
oSession.saveSettings();
oSession.close();
oSession = None;
except:
reporter.errorXcpt('failed to detach/delete disks %s and %s from storage controller' % \
(sDiskPath1, sDiskPath2));
else:
fRc = False;
else:
fRc = False;
return fRc;
def test1OneVM(self, sVmName):
"""
Runs one VM thru the various configurations.
"""
reporter.testStart(sVmName);
fRc = True;
for sStorageCtrl in self.asStorageCtrls:
reporter.testStart(sStorageCtrl);
if sStorageCtrl == 'AHCI':
eStorageCtrl = vboxcon.StorageControllerType_IntelAhci;
elif sStorageCtrl == 'IDE':
eStorageCtrl = vboxcon.StorageControllerType_PIIX4;
elif sStorageCtrl == 'LsiLogicSAS':
eStorageCtrl = vboxcon.StorageControllerType_LsiLogicSas;
elif sStorageCtrl == 'LsiLogic':
eStorageCtrl = vboxcon.StorageControllerType_LsiLogic;
elif sStorageCtrl == 'BusLogic':
eStorageCtrl = vboxcon.StorageControllerType_BusLogic;
else:
eStorageCtrl = None;
for sDiskFormat in self.asDiskFormats:
reporter.testStart('%s' % (sDiskFormat,));
asPaths = self.asDirs;
for sDir in asPaths:
reporter.testStart('%s' % (sDir,));
sPathDisk1 = sDir + "/disk1.disk";
sPathDisk2 = sDir + "/disk2.disk";
for sGuestFs in self.asGuestFs:
reporter.testStart('%s' % (sGuestFs,));
for cCpus in self.acCpus:
if cCpus == 1: reporter.testStart('1 cpu');
else: reporter.testStart('%u cpus' % (cCpus,));
for sVirtMode in self.asVirtModes:
if sVirtMode == 'raw' and cCpus > 1:
continue;
hsVirtModeDesc = {};
hsVirtModeDesc['raw'] = 'Raw-mode';
hsVirtModeDesc['hwvirt'] = 'HwVirt';
hsVirtModeDesc['hwvirt-np'] = 'NestedPaging';
reporter.testStart(hsVirtModeDesc[sVirtMode]);
fHwVirt = sVirtMode != 'raw';
fNestedPaging = sVirtMode == 'hwvirt-np';
fRc = self.test1OneCfg(sVmName, eStorageCtrl, sDiskFormat, sPathDisk1, sPathDisk2, \
sGuestFs, cCpus, fHwVirt, fNestedPaging) and fRc and True;
reporter.testDone();
reporter.testDone();
reporter.testDone();
reporter.testDone();
reporter.testDone();
reporter.testDone();
reporter.testDone();
return fRc;
def test1(self):
"""
Executes test #1.
"""
# Loop thru the test VMs.
for sVM in self.asTestVMs:
# run test on the VM.
if not self.test1OneVM(sVM):
fRc = False;
else:
fRc = True;
return fRc;
if __name__ == '__main__':
sys.exit(tdStorageStress().main(sys.argv));
|
sobomax/virtualbox_64bit_edd
|
src/VBox/ValidationKit/tests/storage/tdStorageStress1.py
|
Python
|
gpl-2.0
| 23,926
|
#!/usr/bin/env python
import os
import re
from repositoryhandler.backends import create_repository,\
create_repository_from_path, RepositoryUnknownError
from repositoryhandler.backends.watchers import *
from tests import Test, register_test, remove_directory
class SVNTest(Test):
def checkout(self):
# checkout
self.repo = create_repository('svn',
'http://svn.gnome.org/svn/gnome-common')
self.repo.checkout('gnome-common', '/tmp/', branch="trunk", rev="3910")
if not os.path.exists('/tmp/gnome-common/.svn') or \
self.repo.get_last_revision('/tmp/gnome-common') != "3910":
print "SVN checkout: FAILED"
return
self.repo.checkout('gnome-common', '/tmp/', newdir='gnome-common-2.16',
branch='gnome-2-16')
if os.path.exists('/tmp/gnome-common-2.16/.svn'):
print "SVN checkout: PASSED"
try:
repo2 = create_repository_from_path('/tmp/gnome-common-2.16')
except:
print "SVN create_repository_from_path: FAILED"
return
try:
repo2 = create_repository_from_path('/tmp/')
except RepositoryUnknownError:
print "SVN create_repository_from_path: PASSED"
except:
print "SVN create_repository_from_path: FAILED"
else:
print "SVN checkout: FAILED"
return
try:
# Repository without trunk dir
repo2 = create_repository(
'svn',
'https://svn.forge.morfeo-project.org/svn/libresoft-tools')
repo2.checkout('octopus/trunk', '/tmp/', newdir='octopus')
if not os.path.exists('/tmp/octopus/.svn'):
print "SVN checkout repo without /trunk: FAILED"
return
except:
print "SVN checkout repo without /trunk: FAILED"
return
print "SVN checkout repo without /trunk: PASSED"
try:
# Download unconditionally the whole repo
repo3 = create_repository('svn',
'http://svn.gnome.org/svn/asyncworker')
repo3.checkout('.', '/tmp/')
if not os.path.exists('/tmp/asyncworker/.svn'):
print "SVN checkout the whole repo: FAILED"
return
except:
print "SVN checkout the whole repo: FAILED"
return
print "SVN checkout the whole repo: PASSED"
def update(self):
# update(other branch)
try:
self.repo.update('/tmp/gnome-common', rev='3900')
except:
print "SVN update: FAILED"
return
print "SVN update: PASSED"
def cat(self):
def cat_output(line, user_data):
user_data[0] += 1
n_lines = [0]
self.repo.add_watch(CAT, cat_output, n_lines)
# cat a file using a local path
try:
self.repo.cat('/tmp/gnome-common/ChangeLog')
except:
print "SVN cat: FAILED"
return
if n_lines[0] != 795:
print "SVN cat: FAILED"
return
n_lines[0] = 0
# cat a file using a remote path
try:
self.repo.cat(
"http://svn.gnome.org/svn/gnome-common/trunk/ChangeLog",
rev="3900")
except:
print "SVN cat: FAILED"
return
if n_lines[0] != 795:
print "SVN cat: FAILED"
else:
print "SVN cat: PASSED"
def log(self):
# log(current branch)
def log_cb(data, user_data=None):
self.log_data += data
self.repo.add_watch(LOG, log_cb)
try:
# Using a local path
self.log_data = ""
self.repo.log('/tmp/gnome-common', files=['ChangeLog'])
except:
print "SVN log: FAILED"
return
if len(self.log_data) <= 0:
print "SVN log: FAILED"
return
try:
# Using an URI
self.log_data = ""
self.repo.log('http://svn.gnome.org/svn/gnome-common/trunk',
files=['ChangeLog'])
except:
print "SVN log: FAILED"
return
if len(self.log_data) <= 0:
print "SVN log: FAILED"
# Repository without trunk dir
repo2 = create_repository(
'svn',
'https://svn.forge.morfeo-project.org/svn/libresoft-tools')
repo2.add_watch(LOG, log_cb)
try:
self.log_data = ""
repo2.rlog('octopus/trunk')
except:
print "SVN rlog: FAILED"
return
if len(self.log_data) <= 0:
print "SVN rlog: FAILED"
else:
print "SVN rlog: PASSED"
def diff(self):
try:
# Using a local path
self.repo.diff('/tmp/gnome-common', files=['ChangeLog'],
revs=['3900', '3901'])
except:
print "SVN diff: FAILED"
return
try:
# Using an URI
self.repo.diff('http://svn.gnome.org/svn/gnome-common',
branch='gnome-2-16',
files=['macros/autogen.sh'],
revs=['3875', '2834'])
print "SVN diff: PASSED"
except:
print "SVN diff: FAILED"
def blame(self):
try:
# Local path with single file
self.repo.blame('/tmp/gnome-common/ChangeLog', rev='3900')
except:
print "SVN blame: FAILED"
return
try:
# Local path several files
self.repo.blame('/tmp/gnome-common/',
files=['autogen.sh', 'ChangeLog'])
except:
print "SVN blame: FAILED"
return
try:
# Remote uri
self.repo.blame(
'http://svn.gnome.org/svn/gnome-common/trunk/ChangeLog',
rev='3901')
except:
print "SVN blame: FAILED"
return
print "SVN blame: PASSED"
def get_modules(self):
try:
# First layout
repo = create_repository('svn',
'http://svn.gnome.org/svn/gnome-common')
module = repo.get_modules()
if module[0] != 'gnome-common':
print "SVN get_modules: FAILED"
return
except:
print "SVN get_modules: FAILED"
return
try:
# Second layout
repo = create_repository(
'svn',
'https://svn.forge.morfeo-project.org/svn/libresoft-tools')
modules = repo.get_modules()
if len(modules) <= 0:
print "SVN get_modules: FAILED"
return
except:
print "SVN get_modules: FAILED"
return
print "SVN get_modules: PASSED"
def get_last_revision(self):
try:
rev = self.repo.get_last_revision('/tmp/gnome-common')
if rev is not None:
print "SVN get_last_revision(%s): PASSED" % (rev)
return
except:
pass
print "SVN get_last_revision: FAILED"
def clean(self):
remove_directory('/tmp/gnome-common')
remove_directory('/tmp/gnome-common-2.16')
remove_directory('/tmp/octopus/')
remove_directory('/tmp/atunes')
register_test('svn', SVNTest)
|
pombredanne/RepositoryHandler
|
tests/svn.py
|
Python
|
gpl-2.0
| 7,720
|
nums = [11,22,33,44,55]
#while循环的遍历方式
#nums_lenght = len(nums)
#i = 0
#while i<nums_lenght:
# print(nums[i])
# i+=1
#for循环的遍历方式(因为不用控制元素的个数,以及下标,所以使用起来会更简单)
for num in nums:
print(num)
|
jameswatt2008/jameswatt2008.github.io
|
python/Python基础/截图和代码/元组、函数-上/01-遍历列表的方式.py
|
Python
|
gpl-2.0
| 280
|
"""
Kodi urlresolver plugin
Copyright (C) 2016 script.module.urlresolver
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from lib import helpers
from urlresolver.resolver import UrlResolver, ResolverError
class VideocloudResolver(UrlResolver):
name = 'videocloud.co'
domains = ['videocloud.co']
pattern = '(?://|\.)(videocloud\.co)/(?:embed-)?([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id))
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
|
kreatorkodi/repository.torrentbr
|
script.module.urlresolver/lib/urlresolver/plugins/videocloud.py
|
Python
|
gpl-2.0
| 1,197
|
"""
This page is in the table of contents.
Gcode_small is an export plugin to remove the comments and the redundant z and feed rate parameters from a gcode file.
An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function. It is meant to be run from the export tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getOutput function of this script takes a gcode text and returns that text without comments and redundant z and feed rate parameters. The writeOutput function of this script takes a gcode text and writes that text without comments and redundant z and feed rate parameters to a file.
Many of the functions in this script are copied from gcodec in skeinforge_utilities. They are copied rather than imported so developers making new plugins do not have to learn about gcodec, the code here is all they need to learn.
"""
from __future__ import absolute_import
import cStringIO
import os
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
# This is true if the output is text and false if it is binary."
globalIsReplaceable = True
def getOutput(gcodeText):
'Get the exported version of a gcode file.'
return GcodeSmallSkein().getCraftedGcode(gcodeText)
def getSplitLineBeforeBracketSemicolon(line):
"Get the split line before a bracket or semicolon."
bracketSemicolonIndex = min( line.find(';'), line.find('(') )
if bracketSemicolonIndex < 0:
return line.split()
return line[ : bracketSemicolonIndex ].split()
def getStringFromCharacterSplitLine(character, splitLine):
"Get the string after the first occurence of the character in the split line."
indexOfCharacter = indexOfStartingWithSecond(character, splitLine)
if indexOfCharacter < 0:
return None
return splitLine[indexOfCharacter][1 :]
def getSummarizedFileName(fileName):
"Get the fileName basename if the file is in the current working directory, otherwise return the original full name."
if os.getcwd() == os.path.dirname(fileName):
return os.path.basename(fileName)
return fileName
def getTextLines(text):
"Get the all the lines of text of a text."
return text.replace('\r', '\n').split('\n')
def indexOfStartingWithSecond(letter, splitLine):
"Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found"
for wordIndex in xrange( 1, len(splitLine) ):
word = splitLine[ wordIndex ]
firstLetter = word[0]
if firstLetter == letter:
return wordIndex
return - 1
class GcodeSmallSkein:
"A class to remove redundant z and feed rate parameters from a skein of extrusions."
def __init__(self):
self.lastFeedRateString = None
self.lastZString = None
self.output = cStringIO.StringIO()
def getCraftedGcode( self, gcodeText ):
"Parse gcode text and store the gcode."
lines = getTextLines(gcodeText)
for line in lines:
self.parseLine(line)
return self.output.getvalue()
def parseLine(self, line):
"Parse a gcode line."
splitLine = getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if len(firstWord) < 1:
return
if firstWord[0] == '(':
return
if firstWord != 'G1':
self.output.write(line + '\n')
return
eString = getStringFromCharacterSplitLine('E', splitLine )
xString = getStringFromCharacterSplitLine('X', splitLine )
yString = getStringFromCharacterSplitLine('Y', splitLine )
zString = getStringFromCharacterSplitLine('Z', splitLine )
feedRateString = getStringFromCharacterSplitLine('F', splitLine )
self.output.write('G1')
if xString != None:
self.output.write(' X' + xString )
if yString != None:
self.output.write(' Y' + yString )
if zString != None and zString != self.lastZString:
self.output.write(' Z' + zString )
if feedRateString != None and feedRateString != self.lastFeedRateString:
self.output.write(' F' + feedRateString )
if eString != None:
self.output.write(' E' + eString )
self.lastFeedRateString = feedRateString
self.lastZString = zString
self.output.write('\n')
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-31/skeinforge_application/skeinforge_plugins/craft_plugins/export_plugins/static_plugins/gcode_small.py
|
Python
|
gpl-2.0
| 4,295
|