repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
pitch-sands/i-MPI
|
refs/heads/master
|
flask/Lib/site-packages/pip-1.5.6-py2.7.egg/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py
|
714
|
# urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
FlaPer87/django-nonrel
|
refs/heads/master
|
tests/regressiontests/bug639/models.py
|
106
|
import tempfile
from django.db import models
from django.core.files.storage import FileSystemStorage
from django.forms import ModelForm
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
class Photo(models.Model):
title = models.CharField(max_length=30)
image = models.FileField(storage=temp_storage, upload_to='tests')
# Support code for the tests; this keeps track of how many times save()
# gets called on each instance.
def __init__(self, *args, **kwargs):
super(Photo, self).__init__(*args, **kwargs)
self._savecount = 0
def save(self, force_insert=False, force_update=False):
super(Photo, self).save(force_insert, force_update)
self._savecount += 1
class PhotoForm(ModelForm):
class Meta:
model = Photo
|
adobecs5/urp2015
|
refs/heads/master
|
lib/python3.4/site-packages/pip/basecommand.py
|
79
|
"""Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import os
import sys
import traceback
import optparse
import warnings
from pip._vendor.six import StringIO
from pip import cmdoptions
from pip.locations import running_under_virtualenv
from pip.download import PipSession
from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
CommandError, PreviousBuildDirError)
from pip.compat import logging_dictConfig
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.status_codes import (
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
from pip.utils import appdirs, get_prog, normalize_path
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None
usage = None
hidden = False
log_stream = "ext://sys.stdout"
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
if options.quiet:
level = "WARNING"
elif options.verbose:
level = "DEBUG"
else:
level = "INFO"
# Compute the path for our debug log.
debug_log_path = os.path.join(appdirs.user_log_dir("pip"), "debug.log")
# Ensure that the path for our debug log is owned by the current user
# and if it is not, disable the debug log.
write_debug_log = check_path_owner(debug_log_path)
logging_dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": (
"%(message)s"
if not options.log_explicit_levels
else "[%(levelname)s] %(message)s"
),
},
},
"handlers": {
"console": {
"level": level,
"class": "pip.utils.logging.ColorizedStreamHandler",
"stream": self.log_stream,
"formatter": "indent",
},
"debug_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": debug_log_path,
"maxBytes": 10 * 1000 * 1000, # 10 MB
"backupCount": 1,
"delay": True,
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": "pip.utils.logging.BetterRotatingFileHandler",
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": level,
"handlers": list(filter(None, [
"console",
"debug_log" if write_debug_log else None,
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": dict(
(
name,
{
"level": (
"WARNING"
if level in ["INFO", "ERROR"]
else "DEBUG"
),
},
)
for name in ["pip._vendor", "distlib", "requests", "urllib3"]
),
})
# We add this warning here instead of up above, because the logger
# hasn't been configured until just now.
if not write_debug_log:
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the debug log has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want the -H flag.",
os.path.dirname(debug_log_path),
)
if options.log_explicit_levels:
warnings.warn(
"--log-explicit-levels has been deprecated and will be removed"
" in a future version.",
RemovedInPip8Warning,
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check
and not getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:\n%s', format_exc())
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:\n%s', format_exc())
return ERROR
except:
logger.critical('Exception:\n%s', format_exc())
return UNKNOWN_ERROR
return SUCCESS
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
|
lebabouin/CouchPotatoServer-develop
|
refs/heads/master
|
libs/pyutil/scripts/unsort.py
|
106
|
#!/usr/bin/env python
# randomize the lines of stdin or a file
import random, sys
def main():
if len(sys.argv) > 1:
fname = sys.argv[1]
inf = open(fname, 'r')
else:
inf = sys.stdin
lines = inf.readlines()
random.shuffle(lines)
sys.stdout.writelines(lines)
if __name__ == '__main__':
main()
|
JamesMGreene/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/CygwinDownloader/cygwin-downloader.py
|
120
|
#!/usr/bin/env python
import os, random, sys, time, urllib
#
# Options
#
dry_run = len(sys.argv) > 1 and "--dry-run" in set(sys.argv[1:])
quiet = len(sys.argv) > 1 and "--quiet" in set(sys.argv[1:])
#
# Functions and constants
#
def download_progress_hook(block_count, block_size, total_blocks):
if quiet or random.random() > 0.5:
return
sys.stdout.write(".")
sys.stdout.flush()
def download_url_to_file(url, file, message):
if not quiet:
print message + " ",
if not dry_run:
dir = os.path.dirname(file)
if len(dir) and not os.path.exists(dir):
os.makedirs(dir)
urllib.urlretrieve(url, file, download_progress_hook)
if not quiet:
print
# This is mostly just the list of North America http mirrors from http://cygwin.com/mirrors.html,
# but a few have been removed that seemed unresponsive from Cupertino.
mirror_servers = ["http://cygwin.elite-systems.org/",
"http://mirror.mcs.anl.gov/cygwin/",
"http://cygwin.osuosl.org/",
"http://mirrors.kernel.org/sourceware/cygwin/",
"http://mirrors.xmission.com/cygwin/",
"http://sourceware.mirrors.tds.net/pub/sourceware.org/cygwin/"]
package_mirror_url = mirror_servers[random.choice(range(len(mirror_servers)))]
def download_package(package, message):
download_url_to_file(package_mirror_url + package["path"], package["path"], message)
required_packages = frozenset(["apache",
"bc",
"bison",
"curl",
"diffutils",
"e2fsprogs",
"emacs",
"flex",
"gcc",
"gperf",
"keychain",
"make",
"minires",
"nano",
"openssh",
"patch",
"perl",
"perl-libwin32",
"python",
"rebase",
"rsync",
"ruby",
"subversion",
"unzip",
"vim",
"zip"])
#
# Main
#
print "Using Cygwin mirror server " + package_mirror_url + " to download setup.ini..."
urllib.urlretrieve(package_mirror_url + "setup.ini", "setup.ini.orig")
downloaded_packages_file_path = "setup.ini.orig"
downloaded_packages_file = file(downloaded_packages_file_path, "r")
if not dry_run:
modified_packages_file = file("setup.ini", "w")
packages = {}
current_package = ''
for line in downloaded_packages_file.readlines():
if line[0] == "@":
current_package = line[2:-1]
packages[current_package] = {"name": current_package, "needs_download": False, "requires": [], "path": ""}
elif line[:10] == "category: ":
if current_package in required_packages:
line = "category: Base\n"
if "Base" in set(line[10:-1].split()):
packages[current_package]["needs_download"] = True
elif line[:10] == "requires: ":
packages[current_package]["requires"] = line[10:].split()
packages[current_package]["requires"].sort()
elif line[:9] == "install: " and not len(packages[current_package]["path"]):
end_of_path = line.find(" ", 9)
if end_of_path != -1:
packages[current_package]["path"] = line[9:end_of_path]
if not dry_run:
modified_packages_file.write(line)
downloaded_packages_file.close()
os.remove(downloaded_packages_file_path)
if not dry_run:
modified_packages_file.close()
names_to_download = set()
package_names = packages.keys()
package_names.sort()
def add_package_and_dependencies(name):
if name in names_to_download:
return
if not name in packages:
return
packages[name]["needs_download"] = True
names_to_download.add(name)
for dep in packages[name]["requires"]:
add_package_and_dependencies(dep)
for name in package_names:
if packages[name]["needs_download"]:
add_package_and_dependencies(name)
downloaded_so_far = 0
for name in package_names:
if packages[name]["needs_download"]:
downloaded_so_far += 1
download_package(packages[name], "Downloading package %3d of %3d (%s)" % (downloaded_so_far, len(names_to_download), name))
download_url_to_file("http://cygwin.com/setup.exe", "setup.exe", "Downloading setup.exe")
seconds_to_sleep = 10
print """
Finished downloading Cygwin. In %d seconds,
I will run setup.exe. Select the "Install
from Local Directory" option and browse to
"%s"
when asked for the "Local Package Directory".
""" % (seconds_to_sleep, os.getcwd())
while seconds_to_sleep > 0:
print "%d..." % seconds_to_sleep,
sys.stdout.flush()
time.sleep(1)
seconds_to_sleep -= 1
print
if not dry_run:
os.execl("setup.exe")
|
jeroen92/reclass
|
refs/heads/master
|
setup.py
|
3
|
#
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–13 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
from reclass.version import *
from setuptools import setup, find_packages
ADAPTERS = ['salt', 'ansible']
console_scripts = ['reclass = reclass.cli:main']
console_scripts.extend('reclass-{0} = reclass.adapters.{0}:cli'.format(i)
for i in ADAPTERS)
setup(
name = RECLASS_NAME,
description = DESCRIPTION,
version = VERSION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
license = LICENCE,
url = URL,
packages = find_packages(),
entry_points = { 'console_scripts': console_scripts },
install_requires = ['pyyaml']
)
|
rkmaddox/mne-python
|
refs/heads/master
|
mne/io/ctf/tests/test_ctf.py
|
4
|
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import copy
import os
from os import path as op
import shutil
import numpy as np
from numpy import array_equal
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import mne
from mne import (pick_types, read_annotations, create_info,
events_from_annotations, make_forward_solution)
from mne.transforms import apply_trans
from mne.io import read_raw_fif, read_raw_ctf, RawArray
from mne.io.compensator import get_current_comp
from mne.io.ctf.constants import CTF
from mne.io.tests.test_raw import _test_raw_reader
from mne.tests.test_annotations import _assert_annotations_equal
from mne.utils import _clean_names, catch_logging, _stamp_to_dt
from mne.datasets import testing, spm_face, brainstorm
from mne.io.constants import FIFF
ctf_dir = op.join(testing.data_path(download=False), 'CTF')
ctf_fname_continuous = 'testdata_ctf.ds'
ctf_fname_1_trial = 'testdata_ctf_short.ds'
ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds'
ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds'
ctf_fname_somato = 'somMDYO-18av.ds'
ctf_fname_catch = 'catch-alp-good-f.ds'
somato_fname = op.join(
brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw',
'subj001_somatosensory_20111109_01_AUX-f.ds'
)
block_sizes = {
ctf_fname_continuous: 12000,
ctf_fname_1_trial: 4801,
ctf_fname_2_trials: 12000,
ctf_fname_discont: 1201,
ctf_fname_somato: 313,
ctf_fname_catch: 2500,
}
single_trials = (
ctf_fname_continuous,
ctf_fname_1_trial,
)
ctf_fnames = tuple(sorted(block_sizes.keys()))
@pytest.mark.slowtest
@testing.requires_testing_data
def test_read_ctf(tmpdir):
"""Test CTF reader."""
temp_dir = str(tmpdir)
out_fname = op.join(temp_dir, 'test_py_raw.fif')
# Create a dummy .eeg file so we can test our reading/application of it
os.mkdir(op.join(temp_dir, 'randpos'))
ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch)
shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname)
with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'):
raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname)
picks = pick_types(raw.info, meg=False, eeg=True)
pos = np.random.RandomState(42).randn(len(picks), 3)
fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg')
# Create a bad file
with open(fake_eeg_fname, 'wb') as fid:
fid.write('foo\n'.encode('ascii'))
pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname)
# Create a good file
with open(fake_eeg_fname, 'wb') as fid:
for ii, ch_num in enumerate(picks):
args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple(
'%0.5f' % x for x in 100 * pos[ii]) # convert to cm
fid.write(('\t'.join(args) + '\n').encode('ascii'))
pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'):
raw = read_raw_ctf(ctf_eeg_fname) # read modified data
pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read,
rtol=1e-5, atol=1e-5)
assert (pos_read == pos_read_old).mean() < 0.1
shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'),
op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif'))
# Create a version with no hc, starting out *with* EEG pos (error)
os.mkdir(op.join(temp_dir, 'nohc'))
ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch)
shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname)
remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3]))
os.remove(remove_base + '.hc')
with pytest.warns(RuntimeWarning, match='MISC channel'):
pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname)
os.remove(remove_base + '.eeg')
shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'),
op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif'))
# All our files
use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames]
for fname in use_fnames:
raw_c = read_raw_fif(fname + '_raw.fif', preload=True)
with pytest.warns(None): # sometimes matches "MISC channel"
raw = read_raw_ctf(fname)
# check info match
assert_array_equal(raw.ch_names, raw_c.ch_names)
assert_allclose(raw.times, raw_c.times)
assert_allclose(raw._cals, raw_c._cals)
assert (raw.info['meas_id']['version'] ==
raw_c.info['meas_id']['version'] + 1)
for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'],
rtol=1e-4, atol=1e-7)
# XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set.
# Consider adding meas_date to below checks once this is addressed in
# MNE-C
for key in ('acq_pars', 'acq_stim', 'bads',
'ch_names', 'custom_ref_applied', 'description',
'events', 'experimenter', 'highpass', 'line_freq',
'lowpass', 'nchan', 'proj_id', 'proj_name',
'projs', 'sfreq', 'subject_info'):
assert raw.info[key] == raw_c.info[key], key
if op.basename(fname) not in single_trials:
# We don't force buffer size to be smaller like MNE-C
assert raw.buffer_size_sec == raw_c.buffer_size_sec
assert len(raw.info['comps']) == len(raw_c.info['comps'])
for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']):
for key in ('colcals', 'rowcals'):
assert_allclose(c1[key], c2[key])
assert c1['save_calibrated'] == c2['save_calibrated']
for key in ('row_names', 'col_names', 'nrow', 'ncol'):
assert_array_equal(c1['data'][key], c2['data'][key])
assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7,
rtol=1e-5)
assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'],
raw_c.info['hpi_results'][0]['coord_trans']['trans'],
rtol=1e-5, atol=1e-7)
assert len(raw.info['chs']) == len(raw_c.info['chs'])
for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])):
for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul',
'range', 'coord_frame', 'coil_type', 'logno'):
if c1['ch_name'] == 'RMSP' and \
'catch-alp-good-f' in fname and \
key in ('kind', 'unit', 'coord_frame', 'coil_type',
'logno'):
continue # XXX see below...
if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG:
# XXX MNE-C bug that this is not set
assert c2[key] == FIFF.FIFFV_COIL_NONE
continue
assert c1[key] == c2[key], key
for key in ('cal',):
assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
# XXX 2016/02/24: fixed bug with normal computation that used
# to exist, once mne-C tools are updated we should update our FIF
# conversion files, then the slices can go away (and the check
# can be combined with that for "cal")
for key in ('loc',):
if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname:
continue
if (c2[key][:3] == 0.).all():
check = [np.nan] * 3
else:
check = c2[key][:3]
assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
if (c2[key][3:] == 0.).all():
check = [np.nan] * 3
else:
check = c2[key][9:12]
assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
# Make sure all digitization points are in the MNE head coord frame
for p in raw.info['dig']:
assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \
'dig points must be in FIFF.FIFFV_COORD_HEAD'
if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file
raw.info['dig'] = raw.info['dig'][:-10]
# XXX: Next test would fail because c-tools assign the fiducials from
# CTF data as HPI. Should eventually clarify/unify with Matti.
# assert_dig_allclose(raw.info, raw_c.info)
# check data match
raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.)
raw_read = read_raw_fif(out_fname)
# so let's check tricky cases based on sample boundaries
rng = np.random.RandomState(0)
pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10]
bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec))
assert bnd == raw._raw_extras[0]['block_size']
assert bnd == block_sizes[op.basename(fname)]
slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
slice(3, 300), slice(None))
if len(raw.times) >= 2 * bnd: # at least two complete blocks
slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
slice(0, bnd + 100))
for sl_time in slices:
assert_allclose(raw[pick_ch, sl_time][0],
raw_c[pick_ch, sl_time][0])
assert_allclose(raw_read[pick_ch, sl_time][0],
raw_c[pick_ch, sl_time][0])
# all data / preload
raw.load_data()
assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15)
# test bad segment annotations
if 'testdata_ctf_short.ds' in fname:
assert 'bad' in raw.annotations.description[0]
assert_allclose(raw.annotations.onset, [2.15])
assert_allclose(raw.annotations.duration, [0.0225])
with pytest.raises(TypeError, match='path-like'):
read_raw_ctf(1)
with pytest.raises(FileNotFoundError, match='does not exist'):
read_raw_ctf(ctf_fname_continuous + 'foo.ds')
# test ignoring of system clock
read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore')
with pytest.raises(ValueError, match='system_clock'):
read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'foo')
@testing.requires_testing_data
def test_rawctf_clean_names():
"""Test RawCTF _clean_names method."""
# read test data
with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'):
raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch))
raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch),
clean_names=True)
test_channel_names = _clean_names(raw.ch_names)
test_info_comps = copy.deepcopy(raw.info['comps'])
# channel names should not be cleaned by default
assert raw.ch_names != test_channel_names
chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']]
assert chs_ch_names != test_channel_names
for test_comp, comp in zip(test_info_comps, raw.info['comps']):
for key in ('row_names', 'col_names'):
assert not array_equal(_clean_names(test_comp['data'][key]),
comp['data'][key])
# channel names should be cleaned if clean_names=True
assert raw_cleaned.ch_names == test_channel_names
for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names):
assert ch['ch_name'] == test_ch_name
for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']):
for key in ('row_names', 'col_names'):
assert _clean_names(test_comp['data'][key]) == comp['data'][key]
@spm_face.requires_spm_data
def test_read_spm_ctf():
"""Test CTF reader with omitted samples."""
data_path = spm_face.data_path()
raw_fname = op.join(data_path, 'MEG', 'spm',
'SPM_CTF_MEG_example_faces1_3D.ds')
raw = read_raw_ctf(raw_fname)
extras = raw._raw_extras[0]
assert extras['n_samp'] == raw.n_times
assert extras['n_samp'] != extras['n_samp_tot']
# Test that LPA, nasion and RPA are correct.
coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']])
assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD)
cardinals = {d['ident']: d['r'] for d in raw.info['dig']}
assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord
assert cardinals[1][1] < cardinals[2][1] # y coord
assert cardinals[3][1] < cardinals[2][1] # y coord
for key in cardinals.keys():
assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord
@testing.requires_testing_data
@pytest.mark.parametrize('comp_grade', [0, 1])
def test_saving_picked(tmpdir, comp_grade):
"""Test saving picked CTF instances."""
temp_dir = str(tmpdir)
out_fname = op.join(temp_dir, 'test_py_raw.fif')
raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial))
assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0))
raw.crop(0, 1).load_data()
assert raw.compensation_grade == get_current_comp(raw.info) == 0
assert len(raw.info['comps']) == 5
pick_kwargs = dict(meg=True, ref_meg=False, verbose=True)
raw.apply_gradient_compensation(comp_grade)
with catch_logging() as log:
raw_pick = raw.copy().pick_types(**pick_kwargs)
assert len(raw.info['comps']) == 5
assert len(raw_pick.info['comps']) == 0
log = log.getvalue()
assert 'Removing 5 compensators' in log
raw_pick.save(out_fname, overwrite=True) # should work
raw2 = read_raw_fif(out_fname)
assert (raw_pick.ch_names == raw2.ch_names)
assert_array_equal(raw_pick.times, raw2.times)
assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
raw2 = read_raw_fif(out_fname, preload=True)
assert (raw_pick.ch_names == raw2.ch_names)
assert_array_equal(raw_pick.times, raw2.times)
assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
@brainstorm.bst_raw.requires_bstraw_data
def test_read_ctf_annotations():
"""Test reading CTF marker file."""
EXPECTED_LATENCIES = np.array([
5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa
22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa
38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa
56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa
73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa
90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa
105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa
121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa
139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa
156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa
174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa
192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa
209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa
226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa
243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa
260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa
278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa
295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa
312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa
329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa
344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa
361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa
378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa
396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa
413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa
429278, 431668 # noqa
]) - 1 # Fieldtrip has 1 sample difference with MNE
raw = RawArray(
data=np.empty((1, 432000), dtype=np.float64),
info=create_info(ch_names=1, sfreq=1200.0))
raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date'])
raw.set_annotations(read_annotations(somato_fname))
events, _ = events_from_annotations(raw)
latencies = np.sort(events[:, 0])
assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6)
@testing.requires_testing_data
def test_read_ctf_annotations_smoke_test():
"""Test reading CTF marker file.
`testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading
of whatever is in the MarkerFile.mrk.
"""
EXPECTED_ONSET = [
0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667,
0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667,
1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333,
2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57,
3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667,
4.57, 4.7125, 4.85583333, 4.99833333
]
fname = op.join(ctf_dir, 'testdata_ctf_mc.ds')
annot = read_annotations(fname)
assert_allclose(annot.onset, EXPECTED_ONSET)
raw = read_raw_ctf(fname)
_assert_annotations_equal(raw.annotations, annot, 1e-6)
def _read_res4_mag_comp(dsdir):
res = mne.io.ctf.res4._read_res4(dsdir)
for ch in res['chs']:
if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH:
ch['grad_order_no'] = 1
return res
def _bad_res4_grad_comp(dsdir):
res = mne.io.ctf.res4._read_res4(dsdir)
for ch in res['chs']:
if ch['sensor_type_index'] == CTF.CTFV_MEG_CH:
ch['grad_order_no'] = 1
break
return res
@testing.requires_testing_data
def test_read_ctf_mag_bad_comp(tmpdir, monkeypatch):
"""Test CTF reader with mag comps and bad comps."""
path = op.join(ctf_dir, ctf_fname_continuous)
raw_orig = read_raw_ctf(path)
assert raw_orig.compensation_grade == 0
monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp)
raw_mag_comp = read_raw_ctf(path)
assert raw_mag_comp.compensation_grade == 0
sphere = mne.make_sphere_model()
src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere)
assert src[0]['nuse'] == 26
for grade in (0, 1):
raw_orig.apply_gradient_compensation(grade)
raw_mag_comp.apply_gradient_compensation(grade)
args = (None, src, sphere, True, False)
fwd_orig = make_forward_solution(raw_orig.info, *args)
fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args)
assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data'])
monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp)
with pytest.raises(RuntimeError, match='inconsistent compensation grade'):
read_raw_ctf(path)
|
bixbydev/Bixby
|
refs/heads/master
|
google/gdata-2.0.18/samples/analytics/account_feed_demo.py
|
41
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample Google Analytics Data Export API Account Feed application.
This sample demonstrates how to retrieve the important data from the Google
Analytics Data Export API Account feed using the Python Client library. This
requires a Google Analytics username and password and uses the Client Login
authorization routine.
Class AccountFeedDemo: Prints all the import Account Feed data.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import gdata.analytics.client
import gdata.sample_util
def main():
"""Main fucntion for the sample."""
demo = AccountFeedDemo()
demo.PrintFeedDetails()
demo.PrintAdvancedSegments()
demo.PrintCustomVarForOneEntry()
demo.PrintGoalsForOneEntry()
demo.PrintAccountEntries()
class AccountFeedDemo(object):
"""Prints the Google Analytics account feed
Attributes:
account_feed: Google Analytics AccountList returned form the API.
"""
def __init__(self):
"""Inits AccountFeedDemo."""
SOURCE_APP_NAME = 'Google-accountFeedDemoPython-v1'
my_client = gdata.analytics.client.AnalyticsClient(source=SOURCE_APP_NAME)
try:
gdata.sample_util.authorize_client(
my_client,
service=my_client.auth_service,
source=SOURCE_APP_NAME,
scopes=['https://www.google.com/analytics/feeds/'])
except gdata.client.BadAuthentication:
exit('Invalid user credentials given.')
except gdata.client.Error:
exit('Login Error')
account_query = gdata.analytics.client.AccountFeedQuery()
self.feed = my_client.GetAccountFeed(account_query)
def PrintFeedDetails(self):
"""Prints important Analytics related data found at the top of the feed."""
print '-------- Important Feed Data --------'
print 'Feed Title = ' + self.feed.title.text
print 'Feed Id = ' + self.feed.id.text
print 'Total Results Found = ' + self.feed.total_results.text
print 'Start Index = ' + self.feed.start_index.text
print 'Results Returned = ' + self.feed.items_per_page.text
def PrintAdvancedSegments(self):
"""Prints the advanced segments for this user."""
print '-------- Advances Segments --------'
if not self.feed.segment:
print 'No advanced segments found'
else:
for segment in self.feed.segment:
print 'Segment Name = ' + segment.name
print 'Segment Id = ' + segment.id
print 'Segment Definition = ' + segment.definition.text
def PrintCustomVarForOneEntry(self):
"""Prints custom variable information for the first profile that has
custom variable configured."""
print '-------- Custom Variables --------'
if not self.feed.entry:
print 'No entries found'
else:
for entry in self.feed.entry:
if entry.custom_variable:
for custom_variable in entry.custom_variable:
print 'Custom Variable Index = ' + custom_variable.index
print 'Custom Variable Name = ' + custom_variable.name
print 'Custom Variable Scope = ' + custom_variable.scope
return
print 'No custom variables defined for this user'
def PrintGoalsForOneEntry(self):
"""Prints All the goal information for one profile."""
print '-------- Goal Configuration --------'
if not self.feed.entry:
print 'No entries found'
else:
for entry in self.feed.entry:
if entry.goal:
for goal in entry.goal:
print 'Goal Number = ' + goal.number
print 'Goal Name = ' + goal.name
print 'Goal Value = ' + goal.value
print 'Goal Active = ' + goal.active
if goal.destination:
self.PrintDestinationGoal(goal.destination)
elif goal.engagement:
self.PrintEngagementGoal(goal.engagement)
return
def PrintDestinationGoal(self, destination):
"""Prints the important information for destination goals including all
the configured steps if they exist.
Args:
destination: gdata.data.Destination The destination goal configuration.
"""
print '----- Destination Goal -----'
print 'Expression = ' + destination.expression
print 'Match Type = ' + destination.match_type
print 'Step 1 Required = ' + destination.step1_required
print 'Case Sensitive = ' + destination.case_sensitive
# Print goal steps.
if destination.step:
print '----- Destination Goal Steps -----'
for step in destination.step:
print 'Step Number = ' + step.number
print 'Step Name = ' + step.name
print 'Step Path = ' + step.path
def PrintEngagementGoal(self, engagement):
"""Prints the important information for engagement goals.
Args:
engagement: gdata.data.Engagement The engagement goal configuration.
"""
print '----- Engagement Goal -----'
print 'Goal Type = ' + engagement.type
print 'Goal Engagement = ' + engagement.comparison
print 'Goal Threshold = ' + engagement.threshold_value
def PrintAccountEntries(self):
"""Prints important Analytics data found in each account entry"""
print '-------- First 1000 Profiles in Account Feed --------'
if not self.feed.entry:
print 'No entries found'
else:
for entry in self.feed.entry:
print 'Web Property ID = ' + entry.GetProperty('ga:webPropertyId').value
print 'Account Name = ' + entry.GetProperty('ga:accountName').value
print 'Account Id = ' + entry.GetProperty('ga:accountId').value
print 'Profile Name = ' + entry.title.text
print 'Profile ID = ' + entry.GetProperty('ga:profileId').value
print 'Table ID = ' + entry.table_id.text
print 'Currency = ' + entry.GetProperty('ga:currency').value
print 'TimeZone = ' + entry.GetProperty('ga:timezone').value
if entry.custom_variable:
print 'This profile has custom variables'
if entry.goal:
print 'This profile has goals'
if __name__ == '__main__':
main()
|
rruebner/odoo
|
refs/heads/master
|
addons/pad/__init__.py
|
433
|
# -*- coding: utf-8 -*-
import pad
import res_company
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
indictranstech/fbd_erpnext
|
refs/heads/develop
|
erpnext/selling/report/sales_person_target_variance_item_group_wise/sales_person_target_variance_item_group_wise.py
|
52
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt
from erpnext.accounts.utils import get_fiscal_year
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
sim_map = get_salesperson_item_month_map(filters)
data = []
for salesperson, salesperson_items in sim_map.items():
for item_group, monthwise_data in salesperson_items.items():
row = [salesperson, item_group]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "achieved", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "target_on"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = [_("Sales Person") + ":Link/Sales Person:120", _("Item Group") + ":Link/Item Group:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") + " (%s)", _("Achieved") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (_(from_date.strftime("%b")) + " - " + _(to_date.strftime("%b")))
else:
label = label % _(from_date.strftime("%b"))
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Achieved") + ":Float:120",
_("Total Variance") + ":Float:120"]
#Get sales person & item group details
def get_salesperson_details(filters):
return frappe.db.sql("""select sp.name, td.item_group, td.target_qty,
td.target_amount, sp.distribution_id
from `tabSales Person` sp, `tabTarget Detail` td
where td.parent=sp.name and td.fiscal_year=%s order by sp.name""",
(filters["fiscal_year"]), as_dict=1)
#Get target distribution details of item group
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""select md.name, mdp.month, mdp.percentage_allocation
from `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where mdp.parent=md.name and md.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get achieved details from sales order
def get_achieved_details(filters):
start_date, end_date = get_fiscal_year(fiscal_year = filters["fiscal_year"])[1:]
item_details = frappe.db.sql("""select soi.item_code, soi.qty, soi.base_net_amount, so.transaction_date,
st.sales_person, MONTHNAME(so.transaction_date) as month_name
from `tabSales Order Item` soi, `tabSales Order` so, `tabSales Team` st
where soi.parent=so.name and so.docstatus=1 and
st.parent=so.name and so.transaction_date>=%s and
so.transaction_date<=%s""" % ('%s', '%s'),
(start_date, end_date), as_dict=1)
item_actual_details = {}
for d in item_details:
item_actual_details.setdefault(d.sales_person, {}).setdefault(\
get_item_group(d.item_code), []).append(d)
return item_actual_details
def get_salesperson_item_month_map(filters):
import datetime
salesperson_details = get_salesperson_details(filters)
tdd = get_target_distribution_details(filters)
achieved_details = get_achieved_details(filters)
sim_map = {}
for sd in salesperson_details:
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
sim_map.setdefault(sd.name, {}).setdefault(sd.item_group, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "achieved": 0.0
}))
tav_dict = sim_map[sd.name][sd.item_group][month]
month_percentage = tdd.get(sd.distribution_id, {}).get(month, 0) \
if sd.distribution_id else 100.0/12
for ad in achieved_details.get(sd.name, {}).get(sd.item_group, []):
if (filters["target_on"] == "Quantity"):
tav_dict.target = flt(sd.target_qty) * month_percentage / 100
if ad.month_name == month:
tav_dict.achieved += ad.qty
if (filters["target_on"] == "Amount"):
tav_dict.target = flt(sd.target_amount) * month_percentage / 100
if ad.month_name == month:
tav_dict.achieved += ad.base_net_amount
return sim_map
def get_item_group(item_name):
return frappe.db.get_value("Item", item_name, "item_group")
|
talhajaved/nyuadmarket
|
refs/heads/master
|
flask/lib/python2.7/site-packages/setuptools/tests/test_integration.py
|
125
|
"""Run some integration tests.
Try to install a few packages.
"""
import glob
import os
import sys
import pytest
from setuptools.command.easy_install import easy_install
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from setuptools.compat import urlopen
def setup_module(module):
packages = 'stevedore', 'virtualenvwrapper', 'pbr', 'novaclient'
for pkg in packages:
try:
__import__(pkg)
tmpl = "Integration tests cannot run when {pkg} is installed"
pytest.skip(tmpl.format(**locals()))
except ImportError:
pass
try:
urlopen('https://pypi.python.org/pypi')
except Exception as exc:
pytest.skip(reason=str(exc))
@pytest.fixture
def install_context(request, tmpdir, monkeypatch):
"""Fixture to set up temporary installation directory.
"""
# Save old values so we can restore them.
new_cwd = tmpdir.mkdir('cwd')
user_base = tmpdir.mkdir('user_base')
user_site = tmpdir.mkdir('user_site')
install_dir = tmpdir.mkdir('install_dir')
def fin():
# undo the monkeypatch, particularly needed under
# windows because of kept handle on cwd
monkeypatch.undo()
new_cwd.remove()
user_base.remove()
user_site.remove()
install_dir.remove()
request.addfinalizer(fin)
# Change the environment and site settings to control where the
# files are installed and ensure we do not overwrite anything.
monkeypatch.chdir(new_cwd)
monkeypatch.setattr(easy_install_pkg, '__file__', user_site.strpath)
monkeypatch.setattr('site.USER_BASE', user_base.strpath)
monkeypatch.setattr('site.USER_SITE', user_site.strpath)
monkeypatch.setattr('sys.path', sys.path + [install_dir.strpath])
monkeypatch.setenv('PYTHONPATH', os.path.pathsep.join(sys.path))
# Set up the command for performing the installation.
dist = Distribution()
cmd = easy_install(dist)
cmd.install_dir = install_dir.strpath
return cmd
def _install_one(requirement, cmd, pkgname, modulename):
cmd.args = [requirement]
cmd.ensure_finalized()
cmd.run()
target = cmd.install_dir
dest_path = glob.glob(os.path.join(target, pkgname + '*.egg'))
assert dest_path
assert os.path.exists(os.path.join(dest_path[0], pkgname, modulename))
def test_stevedore(install_context):
_install_one('stevedore', install_context,
'stevedore', 'extension.py')
@pytest.mark.xfail
def test_virtualenvwrapper(install_context):
_install_one('virtualenvwrapper', install_context,
'virtualenvwrapper', 'hook_loader.py')
def test_pbr(install_context):
_install_one('pbr', install_context,
'pbr', 'core.py')
@pytest.mark.xfail
def test_python_novaclient(install_context):
_install_one('python-novaclient', install_context,
'novaclient', 'base.py')
|
ShefaliGups11/Implementation-of-SFB-in-ns-3
|
refs/heads/master
|
bindings/python/rad_util.py
|
212
|
# Copyright (c) 2007 RADLogic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provide various handy Python functions.
Running this script directly will execute the doctests.
Functions:
int2bin(i, n) -- Convert integer to binary string.
bin2int(bin_string) -- Convert binary string to integer.
reverse(input_string) -- Reverse a string.
transpose(matrix) -- Transpose a list of lists.
polygon_area(points_list) -- Calculate the area of an arbitrary polygon.
timestamp() -- Return string containing current time stamp.
pt2str(point) -- Return prettier string version of point tuple.
gcf(a, b) -- Return the greatest common factor of two numbers.
lcm(a, b) -- Return the least common multiple of two numbers.
permutations(input_list) -- Generate all permutations of a list of items.
reduce_fraction(fraction) -- Reduce fraction (num, denom) to simplest form.
quantile(l, p) -- Return p quantile of list l. E.g. p=0.25 for q1.
trim(l) -- Discard values in list more than 1.5*IQR outside IQR.
nice_units(value) -- Return value converted to human readable units.
uniquify(seq) -- Return sequence with duplicate items in sequence seq removed.
reverse_dict(d) -- Return the dictionary with the items as keys and vice-versa.
lsb(x, n) -- Return the n least significant bits of x.
gray_encode(i) -- Gray encode the given integer.
random_vec(bits, max_value=None) -- Return a random binary vector.
binary_range(bits) -- Return list of all possible binary numbers width=bits.
float_range([start], stop, [step]) -- Return range of floats.
find_common_fixes(s1, s2) -- Find common (prefix, suffix) of two strings.
is_rotated(seq1, seq2) -- Return true if the list is a rotation of other list.
getmodule(obj) -- Return the module that contains the object definition of obj.
(use inspect.getmodule instead, though)
get_args(argv) -- Store command-line args in a dictionary.
This module requires Python >= 2.2
"""
__author__ = 'Tim Wegener <twegener@radlogic.com.au>'
__date__ = '$Date: 2007/03/27 03:15:06 $'
__version__ = '$Revision: 0.45 $'
__credits__ = """
David Chandler, for polygon area algorithm.
(http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf)
"""
import re
import sys
import time
import random
try:
True, False
except NameError:
True, False = (1==1, 0==1)
def int2bin(i, n):
"""Convert decimal integer i to n-bit binary number (string).
>>> int2bin(0, 8)
'00000000'
>>> int2bin(123, 8)
'01111011'
>>> int2bin(123L, 8)
'01111011'
>>> int2bin(15, 2)
Traceback (most recent call last):
ValueError: Value too large for given number of bits.
"""
hex2bin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'}
# Convert to hex then map each hex digit to binary equivalent.
result = ''.join([hex2bin[x] for x in hex(i).lower().replace('l','')[2:]])
# Shrink result to appropriate length.
# Raise an error if the value is changed by the truncation.
if '1' in result[:-n]:
raise ValueError("Value too large for given number of bits.")
result = result[-n:]
# Zero-pad if length longer than mapped result.
result = '0'*(n-len(result)) + result
return result
def bin2int(bin_string):
"""Convert binary number string to decimal integer.
Note: Python > v2 has int(bin_string, 2)
>>> bin2int('1111')
15
>>> bin2int('0101')
5
"""
## result = 0
## bin_list = list(bin_string)
## if len(filter(lambda x: x in ('1','0'), bin_list)) < len(bin_list):
## raise Exception ("bin2int: Error - not a binary number: %s"
## % bin_string)
## bit_list = map(int, bin_list)
## bit_list.reverse() # Make most significant bit have highest index.
## for bit_place in range(len(bit_list)):
## result = result + ((2**bit_place) * bit_list[bit_place])
## return result
return int(bin_string, 2)
def reverse(input_string):
"""Reverse a string. Useful for strings of binary numbers.
>>> reverse('abc')
'cba'
"""
str_list = list(input_string)
str_list.reverse()
return ''.join(str_list)
def transpose(matrix):
"""Transpose a list of lists.
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']])
[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']]
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']])
[['a', 'd'], ['b', 'e'], ['c', 'f']]
>>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']])
[['a', 'd', 'g'], ['b', 'e', 'h']]
"""
result = zip(*matrix)
# Convert list of tuples to list of lists.
# map is faster than a list comprehension since it is being used with
# a built-in function as an argument.
result = map(list, result)
return result
def polygon_area(points_list, precision=100):
"""Calculate area of an arbitrary polygon using an algorithm from the web.
Return the area of the polygon as a positive float.
Arguments:
points_list -- list of point tuples [(x0, y0), (x1, y1), (x2, y2), ...]
(Unclosed polygons will be closed automatically.
precision -- Internal arithmetic precision (integer arithmetic).
>>> polygon_area([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 0), (0, 0)])
3.0
Credits:
Area of a General Polygon by David Chandler
http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf
"""
# Scale up co-ordinates and convert them to integers.
for i in range(len(points_list)):
points_list[i] = (int(points_list[i][0] * precision),
int(points_list[i][1] * precision))
# Close polygon if not closed.
if points_list[-1] != points_list[0]:
points_list.append(points_list[0])
# Calculate area.
area = 0
for i in range(len(points_list)-1):
(x_i, y_i) = points_list[i]
(x_i_plus_1, y_i_plus_1) = points_list[i+1]
area = area + (x_i_plus_1 * y_i) - (y_i_plus_1 * x_i)
area = abs(area / 2)
# Unscale area.
area = float(area)/(precision**2)
return area
def timestamp():
"""Return string containing current time stamp.
Note: In Python 2 onwards can use time.asctime() with no arguments.
"""
return time.asctime()
def pt2str(point):
"""Return prettier string version of point tuple.
>>> pt2str((1.8, 1.9))
'(1.8, 1.9)'
"""
return "(%s, %s)" % (str(point[0]), str(point[1]))
def gcf(a, b, epsilon=1e-16):
"""Return the greatest common factor of a and b, using Euclidean algorithm.
Arguments:
a, b -- two numbers
If both numbers are integers return an integer result,
otherwise return a float result.
epsilon -- floats less than this magnitude are considered to be zero
(default: 1e-16)
Examples:
>>> gcf(12, 34)
2
>>> gcf(13.5, 4)
0.5
>>> gcf(-2, 4)
2
>>> gcf(5, 0)
5
By (a convenient) definition:
>>> gcf(0, 0)
0
"""
result = max(a, b)
remainder = min(a, b)
while remainder and abs(remainder) > epsilon:
new_remainder = result % remainder
result = remainder
remainder = new_remainder
return abs(result)
def lcm(a, b, precision=None):
"""Return the least common multiple of a and b, using the gcf function.
Arguments:
a, b -- two numbers. If both are integers return an integer result,
otherwise a return a float result.
precision -- scaling factor if a and/or b are floats.
>>> lcm(21, 6)
42
>>> lcm(2.5, 3.5)
17.5
>>> str(lcm(1.5e-8, 2.5e-8, precision=1e9))
'7.5e-08'
By (an arbitary) definition:
>>> lcm(0, 0)
0
"""
# Note: Dummy precision argument is for backwards compatibility.
# Do the division first.
# (See http://en.wikipedia.org/wiki/Least_common_multiple )
denom = gcf(a, b)
if denom == 0:
result = 0
else:
result = a * (b / denom)
return result
def permutations(input_list):
"""Return a list containing all permutations of the input list.
Note: This is a recursive function.
>>> perms = permutations(['a', 'b', 'c'])
>>> perms.sort()
>>> for perm in perms:
... print perm
['a', 'b', 'c']
['a', 'c', 'b']
['b', 'a', 'c']
['b', 'c', 'a']
['c', 'a', 'b']
['c', 'b', 'a']
"""
out_lists = []
if len(input_list) > 1:
# Extract first item in list.
item = input_list[0]
# Find all permutations of remainder of list. (Recursive call.)
sub_lists = permutations(input_list[1:])
# For every permutation of the sub list...
for sub_list in sub_lists:
# Insert the extracted first item at every position of the list.
for i in range(len(input_list)):
new_list = sub_list[:]
new_list.insert(i, item)
out_lists.append(new_list)
else:
# Termination condition: only one item in input list.
out_lists = [input_list]
return out_lists
def reduce_fraction(fraction):
"""Reduce fraction tuple to simplest form. fraction=(num, denom)
>>> reduce_fraction((14, 7))
(2, 1)
>>> reduce_fraction((-2, 4))
(-1, 2)
>>> reduce_fraction((0, 4))
(0, 1)
>>> reduce_fraction((4, 0))
(1, 0)
"""
(numerator, denominator) = fraction
common_factor = abs(gcf(numerator, denominator))
result = (numerator/common_factor, denominator/common_factor)
return result
def quantile(l, p):
"""Return p quantile of list l. E.g. p=0.25 for q1.
See:
http://rweb.stat.umn.edu/R/library/base/html/quantile.html
"""
l_sort = l[:]
l_sort.sort()
n = len(l)
r = 1 + ((n - 1) * p)
i = int(r)
f = r - i
if i < n:
result = (1-f)*l_sort[i-1] + f*l_sort[i]
else:
result = l_sort[i-1]
return result
def trim(l):
"""Discard values in list more than 1.5*IQR outside IQR.
(IQR is inter-quartile-range)
This function uses rad_util.quantile
1.5*IQR -- mild outlier
3*IQR -- extreme outlier
See:
http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm
"""
l_sort = l[:]
l_sort.sort()
# Calculate medianscore (based on stats.py lmedianscore by Gary Strangman)
if len(l_sort) % 2 == 0:
# If even number of scores, average middle 2.
index = int(len(l_sort) / 2) # Integer division correct
median = float(l_sort[index] + l_sort[index-1]) / 2
else:
# int divsion gives mid value when count from 0
index = int(len(l_sort) / 2)
median = l_sort[index]
# Calculate IQR.
q1 = quantile(l_sort, 0.25)
q3 = quantile(l_sort, 0.75)
iqr = q3 - q1
iqr_extra = iqr * 1.5
def in_interval(x, i=iqr_extra, q1=q1, q3=q3):
return (x >= q1-i and x <= q3+i)
l_trimmed = [x for x in l_sort if in_interval(x)]
return l_trimmed
def nice_units(value, dp=0, sigfigs=None, suffix='', space=' ',
use_extra_prefixes=False, use_full_name=False, mode='si'):
"""Return value converted to human readable units eg milli, micro, etc.
Arguments:
value -- number in base units
dp -- number of decimal places to display (rounded)
sigfigs -- number of significant figures to display (rounded)
This overrides dp if set.
suffix -- optional unit suffix to append to unit multiplier
space -- seperator between value and unit multiplier (default: ' ')
use_extra_prefixes -- use hecto, deka, deci and centi as well if set.
(default: False)
use_full_name -- use full name for multiplier symbol,
e.g. milli instead of m
(default: False)
mode -- 'si' for SI prefixes, 'bin' for binary multipliers (1024, etc.)
(Default: 'si')
SI prefixes from:
http://physics.nist.gov/cuu/Units/prefixes.html
(Greek mu changed to u.)
Binary prefixes based on:
http://physics.nist.gov/cuu/Units/binary.html
>>> nice_units(2e-11)
'20 p'
>>> nice_units(2e-11, space='')
'20p'
"""
si_prefixes = {1e24: ('Y', 'yotta'),
1e21: ('Z', 'zetta'),
1e18: ('E', 'exa'),
1e15: ('P', 'peta'),
1e12: ('T', 'tera'),
1e9: ('G', 'giga'),
1e6: ('M', 'mega'),
1e3: ('k', 'kilo'),
1e-3: ('m', 'milli'),
1e-6: ('u', 'micro'),
1e-9: ('n', 'nano'),
1e-12: ('p', 'pico'),
1e-15: ('f', 'femto'),
1e-18: ('a', 'atto'),
1e-21: ('z', 'zepto'),
1e-24: ('y', 'yocto')
}
if use_extra_prefixes:
si_prefixes.update({1e2: ('h', 'hecto'),
1e1: ('da', 'deka'),
1e-1: ('d', 'deci'),
1e-2: ('c', 'centi')
})
bin_prefixes = {2**10: ('K', 'kilo'),
2**20: ('M', 'mega'),
2**30: ('G', 'mega'),
2**40: ('T', 'tera'),
2**50: ('P', 'peta'),
2**60: ('E', 'exa')
}
if mode == 'bin':
prefixes = bin_prefixes
else:
prefixes = si_prefixes
prefixes[1] = ('', '') # Unity.
# Determine appropriate multiplier.
multipliers = prefixes.keys()
multipliers.sort()
mult = None
for i in range(len(multipliers) - 1):
lower_mult = multipliers[i]
upper_mult = multipliers[i+1]
if lower_mult <= value < upper_mult:
mult_i = i
break
if mult is None:
if value < multipliers[0]:
mult_i = 0
elif value >= multipliers[-1]:
mult_i = len(multipliers) - 1
mult = multipliers[mult_i]
# Convert value for this multiplier.
new_value = value / mult
# Deal with special case due to rounding.
if sigfigs is None:
if mult_i < (len(multipliers) - 1) and \
round(new_value, dp) == \
round((multipliers[mult_i+1] / mult), dp):
mult = multipliers[mult_i + 1]
new_value = value / mult
# Concatenate multiplier symbol.
if use_full_name:
label_type = 1
else:
label_type = 0
# Round and truncate to appropriate precision.
if sigfigs is None:
str_value = eval('"%.'+str(dp)+'f" % new_value', locals(), {})
else:
str_value = eval('"%.'+str(sigfigs)+'g" % new_value', locals(), {})
return str_value + space + prefixes[mult][label_type] + suffix
def uniquify(seq, preserve_order=False):
"""Return sequence with duplicate items in sequence seq removed.
The code is based on usenet post by Tim Peters.
This code is O(N) if the sequence items are hashable, O(N**2) if not.
Peter Bengtsson has a blog post with an empirical comparison of other
approaches:
http://www.peterbe.com/plog/uniqifiers-benchmark
If order is not important and the sequence items are hashable then
list(set(seq)) is readable and efficient.
If order is important and the sequence items are hashable generator
expressions can be used (in py >= 2.4) (useful for large sequences):
seen = set()
do_something(x for x in seq if x not in seen or seen.add(x))
Arguments:
seq -- sequence
preserve_order -- if not set the order will be arbitrary
Using this option will incur a speed penalty.
(default: False)
Example showing order preservation:
>>> uniquify(['a', 'aa', 'b', 'b', 'ccc', 'ccc', 'd'], preserve_order=True)
['a', 'aa', 'b', 'ccc', 'd']
Example using a sequence of un-hashable items:
>>> uniquify([['z'], ['x'], ['y'], ['z']], preserve_order=True)
[['z'], ['x'], ['y']]
The sorted output or the non-order-preserving approach should equal
that of the sorted order-preserving approach output:
>>> unordered = uniquify([3, 3, 1, 2], preserve_order=False)
>>> unordered.sort()
>>> ordered = uniquify([3, 3, 1, 2], preserve_order=True)
>>> ordered.sort()
>>> ordered
[1, 2, 3]
>>> int(ordered == unordered)
1
"""
try:
# Attempt fast algorithm.
d = {}
if preserve_order:
# This is based on Dave Kirby's method (f8) noted in the post:
# http://www.peterbe.com/plog/uniqifiers-benchmark
return [x for x in seq if (x not in d) and not d.__setitem__(x, 0)]
else:
for x in seq:
d[x] = 0
return d.keys()
except TypeError:
# Have an unhashable object, so use slow algorithm.
result = []
app = result.append
for x in seq:
if x not in result:
app(x)
return result
# Alias to noun form for backward compatibility.
unique = uniquify
def reverse_dict(d):
"""Reverse a dictionary so the items become the keys and vice-versa.
Note: The results will be arbitrary if the items are not unique.
>>> d = reverse_dict({'a': 1, 'b': 2})
>>> d_items = d.items()
>>> d_items.sort()
>>> d_items
[(1, 'a'), (2, 'b')]
"""
result = {}
for key, value in d.items():
result[value] = key
return result
def lsb(x, n):
"""Return the n least significant bits of x.
>>> lsb(13, 3)
5
"""
return x & ((2 ** n) - 1)
def gray_encode(i):
"""Gray encode the given integer."""
return i ^ (i >> 1)
def random_vec(bits, max_value=None):
"""Generate a random binary vector of length bits and given max value."""
vector = ""
for _ in range(int(bits / 10) + 1):
i = int((2**10) * random.random())
vector += int2bin(i, 10)
if max_value and (max_value < 2 ** bits - 1):
vector = int2bin((int(vector, 2) / (2 ** bits - 1)) * max_value, bits)
return vector[0:bits]
def binary_range(bits):
"""Return a list of all possible binary numbers in order with width=bits.
It would be nice to extend it to match the
functionality of python's range() built-in function.
"""
l = []
v = ['0'] * bits
toggle = [1] + [0] * bits
while toggle[bits] != 1:
v_copy = v[:]
v_copy.reverse()
l.append(''.join(v_copy))
toggle = [1] + [0]*bits
i = 0
while i < bits and toggle[i] == 1:
if toggle[i]:
if v[i] == '0':
v[i] = '1'
toggle[i+1] = 0
else:
v[i] = '0'
toggle[i+1] = 1
i += 1
return l
def float_range(start, stop=None, step=None):
"""Return a list containing an arithmetic progression of floats.
Return a list of floats between 0.0 (or start) and stop with an
increment of step.
This is in functionality to python's range() built-in function
but can accept float increments.
As with range(), stop is omitted from the list.
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
l = []
while cur < stop:
l.append(cur)
cur += step
return l
def find_common_fixes(s1, s2):
"""Find common (prefix, suffix) of two strings.
>>> find_common_fixes('abc', 'def')
('', '')
>>> find_common_fixes('abcelephantdef', 'abccowdef')
('abc', 'def')
>>> find_common_fixes('abcelephantdef', 'abccow')
('abc', '')
>>> find_common_fixes('elephantdef', 'abccowdef')
('', 'def')
"""
prefix = []
suffix = []
i = 0
common_len = min(len(s1), len(s2))
while i < common_len:
if s1[i] != s2[i]:
break
prefix.append(s1[i])
i += 1
i = 1
while i < (common_len + 1):
if s1[-i] != s2[-i]:
break
suffix.append(s1[-i])
i += 1
suffix.reverse()
prefix = ''.join(prefix)
suffix = ''.join(suffix)
return (prefix, suffix)
def is_rotated(seq1, seq2):
"""Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0
"""
# Do a sanity check.
if len(seq1) != len(seq2):
return False
# Look for occurrences of second sequence head item in first sequence.
start_indexes = []
head_item = seq2[0]
for index1 in range(len(seq1)):
if seq1[index1] == head_item:
start_indexes.append(index1)
# Check that wrapped sequence matches.
double_seq1 = seq1 + seq1
for index1 in start_indexes:
if double_seq1[index1:index1+len(seq1)] == seq2:
return True
return False
def getmodule(obj):
"""Return the module that contains the object definition of obj.
Note: Use inspect.getmodule instead.
Arguments:
obj -- python obj, generally a class or a function
Examples:
A function:
>>> module = getmodule(random.choice)
>>> module.__name__
'random'
>>> module is random
1
A class:
>>> module = getmodule(random.Random)
>>> module.__name__
'random'
>>> module is random
1
A class inheriting from a class in another module:
(note: The inheriting class must define at least one function.)
>>> class MyRandom(random.Random):
... def play(self):
... pass
>>> module = getmodule(MyRandom)
>>> if __name__ == '__main__':
... name = 'rad_util'
... else:
... name = module.__name__
>>> name
'rad_util'
>>> module is sys.modules[__name__]
1
Discussion:
This approach is slightly hackish, and won't work in various situations.
However, this was the approach recommended by GvR, so it's as good as
you'll get.
See GvR's post in this thread:
http://groups.google.com.au/group/comp.lang.python/browse_thread/thread/966a7bdee07e3b34/c3cab3f41ea84236?lnk=st&q=python+determine+class+module&rnum=4&hl=en#c3cab3f41ea84236
"""
if hasattr(obj, 'func_globals'):
func = obj
else:
# Handle classes.
func = None
for item in obj.__dict__.values():
if hasattr(item, 'func_globals'):
func = item
break
if func is None:
raise ValueError("No functions attached to object: %r" % obj)
module_name = func.func_globals['__name__']
# Get module.
module = sys.modules[module_name]
return module
def round_grid(value, grid, mode=0):
"""Round off the given value to the given grid size.
Arguments:
value -- value to be roudne
grid -- result must be a multiple of this
mode -- 0 nearest, 1 up, -1 down
Examples:
>>> round_grid(7.5, 5)
10
>>> round_grid(7.5, 5, mode=-1)
5
>>> round_grid(7.3, 5, mode=1)
10
>>> round_grid(7.3, 5.0, mode=1)
10.0
"""
off_grid = value % grid
if mode == 0:
add_one = int(off_grid >= (grid / 2.0))
elif mode == 1 and off_grid:
add_one = 1
elif mode == -1 and off_grid:
add_one = 0
result = ((int(value / grid) + add_one) * grid)
return result
def get_args(argv):
"""Store command-line args in a dictionary.
-, -- prefixes are removed
Items not prefixed with - or -- are stored as a list, indexed by 'args'
For options that take a value use --option=value
Consider using optparse or getopt (in Python standard library) instead.
"""
d = {}
args = []
for arg in argv:
if arg.startswith('-'):
parts = re.sub(r'^-+', '', arg).split('=')
if len(parts) == 2:
d[parts[0]] = parts[1]
else:
d[parts[0]] = None
else:
args.append(arg)
d['args'] = args
return d
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
|
vidonme/xbmc
|
refs/heads/isengard.vidon
|
tools/EventClients/lib/python/ps3/sixaxis.py
|
155
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import time
import sys
import struct
import math
import binascii
from bluetooth import set_l2cap_mtu
SX_SELECT = 1 << 0
SX_L3 = 1 << 1
SX_R3 = 1 << 2
SX_START = 1 << 3
SX_DUP = 1 << 4
SX_DRIGHT = 1 << 5
SX_DDOWN = 1 << 6
SX_DLEFT = 1 << 7
SX_L2 = 1 << 8
SX_R2 = 1 << 9
SX_L1 = 1 << 10
SX_R1 = 1 << 11
SX_TRIANGLE = 1 << 12
SX_CIRCLE = 1 << 13
SX_X = 1 << 14
SX_SQUARE = 1 << 15
SX_POWER = 1 << 16
SX_LSTICK_X = 0
SX_LSTICK_Y = 1
SX_RSTICK_X = 2
SX_RSTICK_Y = 3
# (map, key, amount index, axis)
keymap_sixaxis = {
SX_X : ('XG', 'A', 0, 0),
SX_CIRCLE : ('XG', 'B', 0, 0),
SX_SQUARE : ('XG', 'X', 0, 0),
SX_TRIANGLE : ('XG', 'Y', 0, 0),
SX_DUP : ('XG', 'dpadup', 0, 0),
SX_DDOWN : ('XG', 'dpaddown', 0, 0),
SX_DLEFT : ('XG', 'dpadleft', 0, 0),
SX_DRIGHT : ('XG', 'dpadright', 0, 0),
SX_START : ('XG', 'start', 0, 0),
SX_SELECT : ('XG', 'back', 0, 0),
SX_R1 : ('XG', 'white', 0, 0),
SX_R2 : ('XG', 'rightanalogtrigger', 6, 1),
SX_L2 : ('XG', 'leftanalogtrigger', 5, 1),
SX_L1 : ('XG', 'black', 0, 0),
SX_L3 : ('XG', 'leftthumbbutton', 0, 0),
SX_R3 : ('XG', 'rightthumbbutton', 0, 0),
}
# (data index, left map, left action, right map, right action)
axismap_sixaxis = {
SX_LSTICK_X : ('XG', 'leftthumbstickleft' , 'leftthumbstickright'),
SX_LSTICK_Y : ('XG', 'leftthumbstickup' , 'leftthumbstickdown'),
SX_RSTICK_X : ('XG', 'rightthumbstickleft', 'rightthumbstickright'),
SX_RSTICK_Y : ('XG', 'rightthumbstickup' , 'rightthumbstickdown'),
}
# to make sure all combination keys are checked first
# we sort the keymap's button codes in reverse order
# this guranties that any bit combined button code
# will be processed first
keymap_sixaxis_keys = keymap_sixaxis.keys()
keymap_sixaxis_keys.sort()
keymap_sixaxis_keys.reverse()
def getkeys(bflags):
keys = [];
for k in keymap_sixaxis_keys:
if (k & bflags) == k:
keys.append(k)
bflags = bflags & ~k
return keys;
def normalize(val):
upperlimit = 65281
lowerlimit = 2
val_range = upperlimit - lowerlimit
offset = 10000
val = (val + val_range / 2) % val_range
upperlimit -= offset
lowerlimit += offset
if val < lowerlimit:
val = lowerlimit
if val > upperlimit:
val = upperlimit
val = ((float(val) - offset) / (float(upperlimit) -
lowerlimit)) * 65535.0
if val <= 0:
val = 1
return val
def normalize_axis(val, deadzone):
val = float(val) - 127.5
val = val / 127.5
if abs(val) < deadzone:
return 0.0
if val > 0.0:
val = (val - deadzone) / (1.0 - deadzone)
else:
val = (val + deadzone) / (1.0 - deadzone)
return 65536.0 * val
def normalize_angle(val, valrange):
valrange *= 2
val = val / valrange
if val > 1.0:
val = 1.0
if val < -1.0:
val = -1.0
return (val + 0.5) * 65535.0
def average(array):
val = 0
for i in array:
val += i
return val / len(array)
def smooth(arr, val):
cnt = len(arr)
arr.insert(0, val)
arr.pop(cnt)
return average(arr)
def set_l2cap_mtu2(sock, mtu):
SOL_L2CAP = 6
L2CAP_OPTIONS = 1
s = sock.getsockopt (SOL_L2CAP, L2CAP_OPTIONS, 12)
o = list( struct.unpack ("HHHBBBH", s) )
o[0] = o[1] = mtu
s = struct.pack ("HHHBBBH", *o)
try:
sock.setsockopt (SOL_L2CAP, L2CAP_OPTIONS, s)
except:
print "Warning: Unable to set mtu"
class sixaxis():
def __init__(self, xbmc, control_sock, interrupt_sock):
self.xbmc = xbmc
self.num_samples = 16
self.sumx = [0] * self.num_samples
self.sumy = [0] * self.num_samples
self.sumr = [0] * self.num_samples
self.axis_amount = [0, 0, 0, 0]
self.released = set()
self.pressed = set()
self.pending = set()
self.held = set()
self.psflags = 0
self.psdown = 0
self.mouse_enabled = 0
set_l2cap_mtu2(control_sock, 64)
set_l2cap_mtu2(interrupt_sock, 64)
time.sleep(0.25) # If we ask to quickly here, it sometimes doesn't start
# sixaxis needs this to enable it
# 0x53 => HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE
control_sock.send("\x53\xf4\x42\x03\x00\x00")
data = control_sock.recv(1)
# This command will turn on the gyro and set the leds
# I wonder if turning on the gyro makes it draw more current??
# it's probably a flag somewhere in the following command
# HID Command: HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUTPUT
# HID Report:1
bytes = [0x52, 0x1]
bytes.extend([0x00, 0x00, 0x00])
bytes.extend([0xFF, 0x72])
bytes.extend([0x00, 0x00, 0x00, 0x00])
bytes.extend([0x02]) # 0x02 LED1, 0x04 LED2 ... 0x10 LED4
# The following sections should set the blink frequncy of
# the leds on the controller, but i've not figured out how.
# These values where suggusted in a mailing list, but no explination
# for how they should be combined to the 5 bytes per led
#0xFF = 0.5Hz
#0x80 = 1Hz
#0x40 = 2Hz
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED4 [0xff, 0xff, 0x10, 0x10, 0x10]
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED3 [0xff, 0x40, 0x08, 0x10, 0x10]
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED2 [0xff, 0x00, 0x10, 0x30, 0x30]
bytes.extend([0xFF, 0x00, 0x01, 0x00, 0x01]) #LED1 [0xff, 0x00, 0x10, 0x40, 0x10]
bytes.extend([0x00, 0x00, 0x00, 0x00, 0x00])
bytes.extend([0x00, 0x00, 0x00, 0x00, 0x00])
control_sock.send(struct.pack("42B", *bytes))
data = control_sock.recv(1)
def __del__(self):
self.close()
def close(self):
for key in (self.held | self.pressed):
(mapname, action, amount, axis) = keymap_sixaxis[key]
self.xbmc.send_button_state(map=mapname, button=action, amount=0, down=0, axis=axis)
self.held = set()
self.pressed = set()
def process_socket(self, isock):
data = isock.recv(50)
if data == None:
return False
return self.process_data(data)
def process_data(self, data):
if len(data) < 3:
return False
# make sure this is the correct report
if struct.unpack("BBB", data[0:3]) != (0xa1, 0x01, 0x00):
return False
if len(data) >= 48:
v1 = struct.unpack("h", data[42:44])
v2 = struct.unpack("h", data[44:46])
v3 = struct.unpack("h", data[46:48])
else:
v1 = [0,0]
v2 = [0,0]
v3 = [0,0]
if len(data) >= 50:
v4 = struct.unpack("h", data[48:50])
else:
v4 = [0,0]
ax = float(v1[0])
ay = float(v2[0])
az = float(v3[0])
rz = float(v4[0])
at = math.sqrt(ax*ax + ay*ay + az*az)
bflags = struct.unpack("<I", data[3:7])[0]
if len(data) > 27:
pressure = struct.unpack("BBBBBBBBBBBB", data[15:27])
else:
pressure = [0,0,0,0,0,0,0,0,0,0,0,0,0]
roll = -math.atan2(ax, math.sqrt(ay*ay + az*az))
pitch = math.atan2(ay, math.sqrt(ax*ax + az*az))
pitch -= math.radians(20);
xpos = normalize_angle(roll, math.radians(30))
ypos = normalize_angle(pitch, math.radians(30))
axis = struct.unpack("BBBB", data[7:11])
return self.process_input(bflags, pressure, axis, xpos, ypos)
def process_input(self, bflags, pressure, axis, xpos, ypos):
xval = smooth(self.sumx, xpos)
yval = smooth(self.sumy, ypos)
analog = False
for i in range(4):
config = axismap_sixaxis[i]
self.axis_amount[i] = self.send_singleaxis(axis[i], self.axis_amount[i], config[0], config[1], config[2])
if self.axis_amount[i] != 0:
analog = True
# send the mouse position to xbmc
if self.mouse_enabled == 1:
self.xbmc.send_mouse_position(xval, yval)
if (bflags & SX_POWER) == SX_POWER:
if self.psdown:
if (time.time() - self.psdown) > 5:
for key in (self.held | self.pressed):
(mapname, action, amount, axis) = keymap_sixaxis[key]
self.xbmc.send_button_state(map=mapname, button=action, amount=0, down=0, axis=axis)
raise Exception("PS3 Sixaxis powering off, user request")
else:
self.psdown = time.time()
else:
if self.psdown:
self.mouse_enabled = 1 - self.mouse_enabled
self.psdown = 0
keys = set(getkeys(bflags))
self.released = (self.pressed | self.held) - keys
self.held = (self.pressed | self.held) - self.released
self.pressed = (keys - self.held) & self.pending
self.pending = (keys - self.held)
for key in self.released:
(mapname, action, amount, axis) = keymap_sixaxis[key]
self.xbmc.send_button_state(map=mapname, button=action, amount=0, down=0, axis=axis)
for key in self.held:
(mapname, action, amount, axis) = keymap_sixaxis[key]
if amount > 0:
amount = pressure[amount-1] * 256
self.xbmc.send_button_state(map=mapname, button=action, amount=amount, down=1, axis=axis)
for key in self.pressed:
(mapname, action, amount, axis) = keymap_sixaxis[key]
if amount > 0:
amount = pressure[amount-1] * 256
self.xbmc.send_button_state(map=mapname, button=action, amount=amount, down=1, axis=axis)
if analog or keys or self.mouse_enabled:
return True
else:
return False
def send_singleaxis(self, axis, last_amount, mapname, action_min, action_pos):
amount = normalize_axis(axis, 0.30)
if last_amount < 0:
last_action = action_min
elif last_amount > 0:
last_action = action_pos
else:
last_action = None
if amount < 0:
new_action = action_min
elif amount > 0:
new_action = action_pos
else:
new_action = None
if last_action and new_action != last_action:
self.xbmc.send_button_state(map=mapname, button=last_action, amount=0, axis=1)
if new_action and amount != last_amount:
self.xbmc.send_button_state(map=mapname, button=new_action, amount=abs(amount), axis=1)
return amount
|
ageron/tensorflow
|
refs/heads/master
|
tensorflow/python/compat/compat.py
|
1
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 3, 4)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
M4573R/BuildingMachineLearningSystemsWithPython
|
refs/heads/master
|
ch02/load.py
|
25
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
def load_dataset(dataset_name):
'''
data,labels = load_dataset(dataset_name)
Load a given dataset
Returns
-------
data : numpy ndarray
labels : list of str
'''
data = []
labels = []
with open('./data/{0}.tsv'.format(dataset_name)) as ifile:
for line in ifile:
tokens = line.strip().split('\t')
data.append([float(tk) for tk in tokens[:-1]])
labels.append(tokens[-1])
data = np.array(data)
labels = np.array(labels)
return data, labels
|
htzy/bigfour
|
refs/heads/master
|
common/lib/xmodule/xmodule/video_module/video_xfields.py
|
16
|
"""
XFields for video module.
"""
import datetime
from xblock.fields import Scope, String, Float, Boolean, List, Dict, DateTime
from xmodule.fields import RelativeTime
from xmodule.mixin import LicenseMixin
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class VideoFields(object):
"""Fields for `VideoModule` and `VideoDescriptor`."""
display_name = String(
help=_("The name students see. This name appears in the course ribbon and as a header for the video."),
display_name=_("Component Display Name"),
default="Video",
scope=Scope.settings
)
saved_video_position = RelativeTime(
help=_("Current position in the video."),
scope=Scope.user_state,
default=datetime.timedelta(seconds=0)
)
# TODO: This should be moved to Scope.content, but this will
# require data migration to support the old video module.
youtube_id_1_0 = String(
help=_("Optional, for older browsers: the YouTube ID for the normal speed video."),
display_name=_("YouTube ID"),
scope=Scope.settings,
default="3_yD_cEKoCk"
)
youtube_id_0_75 = String(
help=_("Optional, for older browsers: the YouTube ID for the .75x speed video."),
display_name=_("YouTube ID for .75x speed"),
scope=Scope.settings,
default=""
)
youtube_id_1_25 = String(
help=_("Optional, for older browsers: the YouTube ID for the 1.25x speed video."),
display_name=_("YouTube ID for 1.25x speed"),
scope=Scope.settings,
default=""
)
youtube_id_1_5 = String(
help=_("Optional, for older browsers: the YouTube ID for the 1.5x speed video."),
display_name=_("YouTube ID for 1.5x speed"),
scope=Scope.settings,
default=""
)
start_time = RelativeTime( # datetime.timedelta object
help=_(
"Time you want the video to start if you don't want the entire video to play. "
"Not supported in the native mobile app: the full video file will play. "
"Formatted as HH:MM:SS. The maximum value is 23:59:59."
),
display_name=_("Video Start Time"),
scope=Scope.settings,
default=datetime.timedelta(seconds=0)
)
end_time = RelativeTime( # datetime.timedelta object
help=_(
"Time you want the video to stop if you don't want the entire video to play. "
"Not supported in the native mobile app: the full video file will play. "
"Formatted as HH:MM:SS. The maximum value is 23:59:59."
),
display_name=_("Video Stop Time"),
scope=Scope.settings,
default=datetime.timedelta(seconds=0)
)
#front-end code of video player checks logical validity of (start_time, end_time) pair.
# `source` is deprecated field and should not be used in future.
# `download_video` is used instead.
source = String(
help=_("The external URL to download the video."),
display_name=_("Download Video"),
scope=Scope.settings,
default=""
)
download_video = Boolean(
help=_("Allow students to download versions of this video in different formats if they cannot use the edX video player or do not have access to YouTube. You must add at least one non-YouTube URL in the Video File URLs field."),
display_name=_("Video Download Allowed"),
scope=Scope.settings,
default=False
)
html5_sources = List(
help=_("The URL or URLs where you've posted non-YouTube versions of the video. Each URL must end in .mpeg, .mp4, .ogg, or .webm and cannot be a YouTube URL. (For browser compatibility, we strongly recommend .mp4 and .webm format.) Students will be able to view the first listed video that's compatible with the student's computer. To allow students to download these videos, set Video Download Allowed to True."),
display_name=_("Video File URLs"),
scope=Scope.settings,
)
track = String(
help=_("By default, students can download an .srt or .txt transcript when you set Download Transcript Allowed to True. If you want to provide a downloadable transcript in a different format, we recommend that you upload a handout by using the Upload a Handout field. If this isn't possible, you can post a transcript file on the Files & Uploads page or on the Internet, and then add the URL for the transcript here. Students see a link to download that transcript below the video."),
display_name=_("Downloadable Transcript URL"),
scope=Scope.settings,
default=''
)
download_track = Boolean(
help=_("Allow students to download the timed transcript. A link to download the file appears below the video. By default, the transcript is an .srt or .txt file. If you want to provide the transcript for download in a different format, upload a file by using the Upload Handout field."),
display_name=_("Download Transcript Allowed"),
scope=Scope.settings,
default=False
)
sub = String(
help=_("The default transcript for the video, from the Default Timed Transcript field on the Basic tab. This transcript should be in English. You don't have to change this setting."),
display_name=_("Default Timed Transcript"),
scope=Scope.settings,
default=""
)
show_captions = Boolean(
help=_("Specify whether the transcripts appear with the video by default."),
display_name=_("Show Transcript"),
scope=Scope.settings,
default=True
)
# Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'}
transcripts = Dict(
help=_("Add transcripts in different languages. Click below to specify a language and upload an .srt transcript file for that language."),
display_name=_("Transcript Languages"),
scope=Scope.settings,
default={}
)
transcript_language = String(
help=_("Preferred language for transcript."),
display_name=_("Preferred language for transcript"),
scope=Scope.preferences,
default="en"
)
transcript_download_format = String(
help=_("Transcript file format to download by user."),
scope=Scope.preferences,
values=[
# Translators: This is a type of file used for captioning in the video player.
{"display_name": _("SubRip (.srt) file"), "value": "srt"},
{"display_name": _("Text (.txt) file"), "value": "txt"}
],
default='srt',
)
speed = Float(
help=_("The last speed that the user specified for the video."),
scope=Scope.user_state
)
global_speed = Float(
help=_("The default speed for the video."),
scope=Scope.preferences,
default=1.0
)
youtube_is_available = Boolean(
help=_("Specify whether YouTube is available for the user."),
scope=Scope.user_info,
default=True
)
handout = String(
help=_("Upload a handout to accompany this video. Students can download the handout by clicking Download Handout under the video."),
display_name=_("Upload Handout"),
scope=Scope.settings,
)
only_on_web = Boolean(
help=_(
"Specify whether access to this video is limited to browsers only, or if it can be "
"accessed from other applications including mobile apps."
),
display_name="Video Available on Web Only",
scope=Scope.settings,
default=False
)
edx_video_id = String(
help=_("If you were assigned a Video ID by edX for the video to play in this component, enter the ID here. In this case, do not enter values in the Default Video URL, the Video File URLs, and the YouTube ID fields. If you were not assigned a Video ID, enter values in those other fields and ignore this field."), # pylint: disable=line-too-long
display_name=_("Video ID"),
scope=Scope.settings,
default="",
)
bumper_last_view_date = DateTime(
display_name=_("Date of the last view of the bumper"),
scope=Scope.preferences,
)
bumper_do_not_show_again = Boolean(
display_name=_("Do not show bumper again"),
scope=Scope.preferences,
default=False,
)
|
rkmaddox/mne-python
|
refs/heads/master
|
mne/viz/backends/tests/test_renderer.py
|
11
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import os
import pytest
import numpy as np
from mne.viz.backends.tests._utils import (skips_if_not_mayavi,
skips_if_not_pyvista)
from mne.viz.backends._utils import ALLOWED_QUIVER_MODES
@pytest.fixture
def backend_mocker():
"""Help to test set up 3d backend."""
from mne.viz.backends import renderer
del renderer.MNE_3D_BACKEND
yield
renderer.MNE_3D_BACKEND = None
@pytest.mark.parametrize('backend', [
pytest.param('mayavi', marks=skips_if_not_mayavi),
pytest.param('pyvista', marks=skips_if_not_pyvista),
pytest.param('foo', marks=pytest.mark.xfail(raises=ValueError)),
])
def test_backend_environment_setup(backend, backend_mocker, monkeypatch):
"""Test set up 3d backend based on env."""
monkeypatch.setenv("MNE_3D_BACKEND", backend)
assert os.environ['MNE_3D_BACKEND'] == backend # just double-check
# reload the renderer to check if the 3d backend selection by
# environment variable has been updated correctly
from mne.viz.backends import renderer
renderer.set_3d_backend(backend)
assert renderer.MNE_3D_BACKEND == backend
assert renderer.get_3d_backend() == backend
def test_3d_functions(renderer):
"""Test figure management functions."""
fig = renderer.create_3d_figure((300, 300))
# Mayavi actually needs something in the display to set the title
wrap_renderer = renderer.backend._Renderer(fig=fig)
wrap_renderer.sphere(np.array([0., 0., 0.]), 'w', 1.)
renderer.backend._check_3d_figure(fig)
renderer.backend._set_3d_view(figure=fig, azimuth=None, elevation=None,
focalpoint=(0., 0., 0.), distance=None)
renderer.backend._set_3d_title(figure=fig, title='foo')
renderer.backend._take_3d_screenshot(figure=fig)
renderer.backend._close_all()
def test_3d_backend(renderer):
"""Test default plot."""
# set data
win_size = (600, 600)
win_color = 'black'
tet_size = 1.0
tet_x = np.array([0, tet_size, 0, 0])
tet_y = np.array([0, 0, tet_size, 0])
tet_z = np.array([0, 0, 0, tet_size])
tet_indices = np.array([[0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3]])
tet_color = 'white'
sph_center = np.column_stack((tet_x, tet_y, tet_z))
sph_color = 'red'
sph_scale = tet_size / 3.0
ct_scalars = np.array([0.0, 0.0, 0.0, 1.0])
ct_levels = [0.2, 0.4, 0.6, 0.8]
ct_surface = {
"rr": sph_center,
"tris": tet_indices
}
qv_color = 'blue'
qv_scale = tet_size / 2.0
qv_center = np.array([np.mean((sph_center[va, :],
sph_center[vb, :],
sph_center[vc, :]), axis=0)
for (va, vb, vc) in tet_indices])
center = np.mean(qv_center, axis=0)
qv_dir = qv_center - center
qv_scale_mode = 'scalar'
qv_scalars = np.linspace(1.0, 2.0, 4)
txt_x = 0.0
txt_y = 0.0
txt_text = "renderer"
txt_size = 14
cam_distance = 5 * tet_size
# init scene
rend = renderer.create_3d_figure(
size=win_size,
bgcolor=win_color,
smooth_shading=True,
scene=False,
)
for interaction in ('terrain', 'trackball'):
rend.set_interaction(interaction)
# use mesh
mesh_data = rend.mesh(
x=tet_x,
y=tet_y,
z=tet_z,
triangles=tet_indices,
color=tet_color,
)
rend.remove_mesh(mesh_data)
# use contour
rend.contour(surface=ct_surface, scalars=ct_scalars,
contours=ct_levels, kind='line')
rend.contour(surface=ct_surface, scalars=ct_scalars,
contours=ct_levels, kind='tube')
# use sphere
rend.sphere(center=sph_center, color=sph_color,
scale=sph_scale, radius=1.0)
# use quiver3d
kwargs = dict(
x=qv_center[:, 0],
y=qv_center[:, 1],
z=qv_center[:, 2],
u=qv_dir[:, 0],
v=qv_dir[:, 1],
w=qv_dir[:, 2],
color=qv_color,
scale=qv_scale,
scale_mode=qv_scale_mode,
scalars=qv_scalars,
)
for mode in ALLOWED_QUIVER_MODES:
rend.quiver3d(mode=mode, **kwargs)
with pytest.raises(ValueError, match='Invalid value'):
rend.quiver3d(mode='foo', **kwargs)
# use tube
rend.tube(origin=np.array([[0, 0, 0]]),
destination=np.array([[0, 1, 0]]))
tube = rend.tube(origin=np.array([[1, 0, 0]]),
destination=np.array([[1, 1, 0]]),
scalars=np.array([[1.0, 1.0]]))
# scalar bar
rend.scalarbar(source=tube, title="Scalar Bar",
bgcolor=[1, 1, 1])
# use text
rend.text2d(x_window=txt_x, y_window=txt_y, text=txt_text,
size=txt_size, justification='right')
rend.text3d(x=0, y=0, z=0, text=txt_text, scale=1.0)
rend.set_camera(azimuth=180.0, elevation=90.0,
distance=cam_distance,
focalpoint=center)
rend.reset_camera()
rend.show()
def test_get_3d_backend(renderer):
"""Test get_3d_backend function call for side-effects."""
# Test twice to ensure the first call had no side-effect
orig_backend = renderer.MNE_3D_BACKEND
assert renderer.get_3d_backend() == orig_backend
assert renderer.get_3d_backend() == orig_backend
|
fanor79/BotTwitterRetweet
|
refs/heads/master
|
test.py
|
1
|
#!/usr/bin/env python
# coding: utf-8
import os
import json
import time
import os.path
import sqlite3
from twython import Twython
from pprint import pprint
from function import *
api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)
#Permet de crée le fichier database.db et de crée la table dedans
if os.path.exists('database.db') == False:
con = sqlite3.connect('database.db')
c = con.cursor()
c.execute('CREATE TABLE Tweet(id text)')
print('database.db crée avec la table Tweet')
con.commit()
lesTweets = lireTweet(api)
for leTweet in lesTweets['statuses']:
id_str = leTweet['id_str']
if rechercheId(id_str) == False and isSelfSource(leTweet) == False:
api.retweet(id = id_str)
ajoutListeTweet(id_str)
|
meghana1995/sympy
|
refs/heads/master
|
sympy/matrices/expressions/matmul.py
|
7
|
from __future__ import print_function, division
from sympy.core import Mul, Basic, sympify, Add
from sympy.core.compatibility import range
from sympy.functions import adjoint
from sympy.matrices.expressions.transpose import transpose
from sympy.strategies import (rm_id, unpack, typed, flatten, exhaust,
do_one, new)
from sympy.matrices.expressions.matexpr import (MatrixExpr, ShapeError,
Identity, ZeroMatrix)
from sympy.matrices.matrices import MatrixBase
class MatMul(MatrixExpr):
"""
A product of matrix expressions
Examples
========
>>> from sympy import MatMul, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 4)
>>> B = MatrixSymbol('B', 4, 3)
>>> C = MatrixSymbol('C', 3, 6)
>>> MatMul(A, B, C)
A*B*C
"""
is_MatMul = True
def __new__(cls, *args, **kwargs):
check = kwargs.get('check', True)
args = list(map(sympify, args))
obj = Basic.__new__(cls, *args)
factor, matrices = obj.as_coeff_matrices()
if check:
validate(*matrices)
return obj
@property
def shape(self):
matrices = [arg for arg in self.args if arg.is_Matrix]
return (matrices[0].rows, matrices[-1].cols)
def _entry(self, i, j, expand=True):
coeff, matrices = self.as_coeff_matrices()
if len(matrices) == 1: # situation like 2*X, matmul is just X
return coeff * matrices[0][i, j]
head, tail = matrices[0], matrices[1:]
if len(tail) == 0:
raise ValueError("lenth of tail cannot be 0")
X = head
Y = MatMul(*tail)
from sympy.core.symbol import Dummy
from sympy.concrete.summations import Sum
from sympy.matrices import ImmutableMatrix
k = Dummy('k', integer=True)
if X.has(ImmutableMatrix) or Y.has(ImmutableMatrix):
return coeff*Add(*[X[i, k]*Y[k, j] for k in range(X.cols)])
result = Sum(coeff*X[i, k]*Y[k, j], (k, 0, X.cols - 1))
return result.doit() if expand else result
def as_coeff_matrices(self):
scalars = [x for x in self.args if not x.is_Matrix]
matrices = [x for x in self.args if x.is_Matrix]
coeff = Mul(*scalars)
return coeff, matrices
def as_coeff_mmul(self):
coeff, matrices = self.as_coeff_matrices()
return coeff, MatMul(*matrices)
def _eval_transpose(self):
return MatMul(*[transpose(arg) for arg in self.args[::-1]]).doit()
def _eval_adjoint(self):
return MatMul(*[adjoint(arg) for arg in self.args[::-1]]).doit()
def _eval_trace(self):
factor, mmul = self.as_coeff_mmul()
if factor != 1:
from .trace import Trace
return factor * Trace(mmul)
else:
raise NotImplementedError("Can't simplify any further")
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import Determinant
factor, matrices = self.as_coeff_matrices()
square_matrices = only_squares(*matrices)
return factor**self.rows * Mul(*list(map(Determinant, square_matrices)))
def _eval_inverse(self):
try:
return MatMul(*[
arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1
for arg in self.args[::-1]]).doit()
except ShapeError:
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return canonicalize(MatMul(*args))
def validate(*matrices):
""" Checks for valid shapes for args of MatMul """
for i in range(len(matrices)-1):
A, B = matrices[i:i+2]
if A.cols != B.rows:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
# Rules
def newmul(*args):
if args[0] == 1:
args = args[1:]
return new(MatMul, *args)
def any_zeros(mul):
if any([arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix)
for arg in mul.args]):
matrices = [arg for arg in mul.args if arg.is_Matrix]
return ZeroMatrix(matrices[0].rows, matrices[-1].cols)
return mul
def merge_explicit(matmul):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, eye, Matrix, MatMul, pprint
>>> from sympy.matrices.expressions.matmul import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = Matrix([[1, 1], [1, 1]])
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatMul(A, B, C)
>>> pprint(X)
A*[1 1]*[1 2]
[ ] [ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
A*[4 6]
[ ]
[4 6]
>>> X = MatMul(B, A, C)
>>> pprint(X)
[1 1]*A*[1 2]
[ ] [ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[1 1]*A*[1 2]
[ ] [ ]
[1 1] [3 4]
"""
if not any(isinstance(arg, MatrixBase) for arg in matmul.args):
return matmul
newargs = []
last = matmul.args[0]
for arg in matmul.args[1:]:
if isinstance(arg, MatrixBase) and isinstance(last, MatrixBase):
last = last * arg
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
def xxinv(mul):
""" Y * X * X.I -> Y """
factor, matrices = mul.as_coeff_matrices()
for i, (X, Y) in enumerate(zip(matrices[:-1], matrices[1:])):
try:
if X.is_square and Y.is_square and X == Y.inverse():
I = Identity(X.rows)
return newmul(factor, *(matrices[:i] + [I] + matrices[i+2:]))
except ValueError: # Y might not be invertible
pass
return mul
def remove_ids(mul):
""" Remove Identities from a MatMul
This is a modified version of sympy.strategies.rm_id.
This is necesssary because MatMul may contain both MatrixExprs and Exprs
as args.
See Also
--------
sympy.strategies.rm_id
"""
# Separate Exprs from MatrixExprs in args
factor, mmul = mul.as_coeff_mmul()
# Apply standard rm_id for MatMuls
result = rm_id(lambda x: x.is_Identity is True)(mmul)
if result != mmul:
return newmul(factor, *result.args) # Recombine and return
else:
return mul
def factor_in_front(mul):
factor, matrices = mul.as_coeff_matrices()
if factor != 1:
return newmul(factor, *matrices)
return mul
rules = (any_zeros, remove_ids, xxinv, unpack, rm_id(lambda x: x == 1),
merge_explicit, factor_in_front, flatten)
canonicalize = exhaust(typed({MatMul: do_one(*rules)}))
def only_squares(*matrices):
""" factor matrices only if they are square """
if matrices[0].rows != matrices[-1].cols:
raise RuntimeError("Invalid matrices being multiplied")
out = []
start = 0
for i, M in enumerate(matrices):
if M.cols == matrices[start].rows:
out.append(MatMul(*matrices[start:i+1]).doit())
start = i+1
return out
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_MatMul(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> expr = X * X.T
>>> print(expr)
X*X'
>>> with assuming(Q.orthogonal(X)):
... print(refine(expr))
I
"""
newargs = []
last = expr.args[0]
for arg in expr.args[1:]:
if arg == last.T and ask(Q.orthogonal(arg), assumptions):
last = Identity(arg.shape[0])
elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions):
last = Identity(arg.shape[0])
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
handlers_dict['MatMul'] = refine_MatMul
|
h4de5ing/sees
|
refs/heads/master
|
lib/main.py
|
9
|
try:
import sys
import argparse
import os
from config import Config_Parser
from exceptions import SeesExceptions
from common import *
except ImportError,e:
import sys
sys.stdout.write("%s\n" %e)
sys.exit(1)
def is_file_exists(file_list):
for file in file_list:
if not os.path.exists(file):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "The file \"%s\" doesn't Exists On The System !!!"% (file) + bcolors.ENDC
sys.exit(2)
class AddressAction(argparse.Action):
def __call__(self, parser, args, values, option = None):
args.options = values
if args.attach:
if not args.options:
parser.error("Usage --attach <file1 file2 file3> ")
else:
is_file_exists(args.options)
class Main:
def __init__(self):
parser = argparse.ArgumentParser()
group_parser = parser.add_mutually_exclusive_group(required = True)
group_parser.add_argument('--attach', dest='attach', action='store_const', const='attach', help="Attach Email")
group_parser.add_argument('--text', dest='text', action='store_const', const='text', help="Text Email")
parser.add_argument('options', nargs='*', action = AddressAction)
parser.add_argument('--config_file', '-c', action = 'store', dest = 'config_file', help = "Configuration Files", metavar="FILE", required = True)
parser.add_argument('--mail_user', '-m', action = 'store', dest = 'mail_user_file', help = "Mail User File", metavar="FILE", required = True)
parser.add_argument('--html_file', '-f', action = 'store', dest = 'html_file', help = "Content of Html File" ,metavar="FILE", required = True)
parser.add_argument('--verbose', '-v', action = 'store_true', help = "Verbose For Eending Email", default = False)
self.args = parser.parse_args()
file_list = (self.args.config_file,self.args.mail_user_file,self.args.html_file)
is_file_exists(file_list)
def run(self):
parser = Config_Parser(self.args.config_file)
try:
if self.args.attach:
parser.run(self.args.mail_user_file, self.args.attach, self.args.html_file, self.args.verbose, self.args.options)
else:
parser.run(self.args.mail_user_file, self.args.text, self.args.html_file, self.args.verbose, None)
except SeesExceptions, mess:
raise SeesExceptions("%s"% mess)
|
sodafree/backend
|
refs/heads/master
|
django/views/generic/__init__.py
|
493
|
from django.views.generic.base import View, TemplateView, RedirectView
from django.views.generic.dates import (ArchiveIndexView, YearArchiveView, MonthArchiveView,
WeekArchiveView, DayArchiveView, TodayArchiveView,
DateDetailView)
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
class GenericViewError(Exception):
"""A problem in a generic view."""
pass
|
Maillol/cricri
|
refs/heads/master
|
test/inet/test_http_client.py
|
2
|
import unittest
from io import BytesIO
import http.client
from cricri.inet.http_client import (HTTPResponse,
NoResponseProvidedError)
class MockSocket:
LINES = (b'HTTP/1.1 200 OK\r\n'
b'Date: Tue, 21 Mar 2017 07:02:52 GMT\r\n'
b'Server: CherryPy/10.2.1\r\n'
b'Content-Type: application/json\r\n'
b'Allow: DELETE, GET, HEAD, POST, PUT\r\n'
b'Content-Length: 2\r\n'
b'\r\n'
b'[]')
@classmethod
def makefile(cls, _mode):
return BytesIO(cls.LINES)
class TestHTTPResponse(unittest.TestCase):
@classmethod
def setUpClass(cls):
response = http.client.HTTPResponse(MockSocket)
response.begin()
cls.responses = HTTPResponse(response)
def test_version_should_be_1_1(self):
self.assertEqual(self.responses.version, 11)
def test_status_code_must_be_200(self):
self.assertEqual(self.responses.status_code, 200)
def test_reason_must_ok(self):
self.assertEqual(self.responses.reason, 'OK')
def test_assert_header_has_should_raise(self):
"""
When HTTPResponse header has not value.
"""
with self.assertRaises(AssertionError):
self.responses.assert_header_has('Allow', 'PATCH')
def test_assert_header_has_should_not_raise(self):
"""
When HTTPResponse header has value.
"""
self.assertIsNone(
self.responses.assert_header_has('Allow', 'DELETE')
)
def test_assert_header_is_should_raise(self):
"""
When HTTPResponse header is not exactly.
"""
with self.assertRaises(AssertionError):
self.responses.assert_header_is('Allow',
'GET, DELETE, HEAD, POST, PUT')
def test_assert_header_is_should_not_raise(self):
"""
When HTTPResponse header is exactly the given value.
"""
self.assertIsNone(
self.responses.assert_header_is('Allow',
'DELETE, GET, HEAD, POST, PUT')
)
def test_assert_reason_should_raise(self):
self.assertIsNone(
self.responses.assert_reason('OK')
)
def test_assert_reason_should_not_raise(self):
with self.assertRaises(AssertionError):
self.responses.assert_reason('FAILED')
def test_assert_status_code_should_raise(self):
with self.assertRaises(AssertionError):
self.responses.assert_status_code(201)
def test_assert_status_code_should_not_raise(self):
self.assertIsNone(
self.responses.assert_status_code(200)
)
def test_should_not_use_assert_when_response_is_not_provided(self):
with self.assertRaises(NoResponseProvidedError):
response = HTTPResponse()
self.assertIsNone(
response.assert_status_code(200)
)
|
cainiaocome/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py
|
221
|
"""
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
|
javiergarridomellado/Empresa_django
|
refs/heads/master
|
devcodela/lib/python2.7/site-packages/django/contrib/gis/geometry/backend/__init__.py
|
128
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
geom_backend = getattr(settings, 'GEOMETRY_BACKEND', 'geos')
try:
module = import_module('.%s' % geom_backend, 'django.contrib.gis.geometry.backend')
except ImportError:
try:
module = import_module(geom_backend)
except ImportError:
raise ImproperlyConfigured('Could not import user-defined GEOMETRY_BACKEND '
'"%s".' % geom_backend)
try:
Geometry = module.Geometry
GeometryException = module.GeometryException
except AttributeError:
raise ImproperlyConfigured('Cannot import Geometry from the "%s" '
'geometry backend.' % geom_backend)
|
alexallah/django
|
refs/heads/master
|
tests/contenttypes_tests/urls.py
|
72
|
from django.conf.urls import url
from django.contrib.contenttypes import views
urlpatterns = [
url(r'^shortcut/([0-9]+)/(.*)/$', views.shortcut),
]
|
UAVCAN/pyuavcan
|
refs/heads/master
|
pyuavcan/dsdl/_composite_object.py
|
1
|
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
from __future__ import annotations
import abc
import gzip
import typing
import pickle
import base64
import logging
import importlib
import pydsdl
from . import _serialized_representation
_logger = logging.getLogger(__name__)
class CompositeObject(abc.ABC): # Members are surrounded with underscores to avoid collisions with DSDL attributes.
"""
This is the base class for all Python classes generated from DSDL definitions.
It does not have any public members.
"""
_MODEL_: pydsdl.CompositeType
"""Type definition as provided by PyDSDL."""
_EXTENT_BYTES_: int
"""Defined in generated classes."""
@abc.abstractmethod
def _serialize_(self, _ser_: _serialized_representation.Serializer) -> None:
"""
Auto-generated serialization method.
Appends the serialized representation of its object to the supplied Serializer instance.
This is not a part of the API.
"""
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def _deserialize_(_des_: _serialized_representation.Deserializer) -> CompositeObject:
"""
Auto-generated deserialization method. Consumes (some) data from the supplied Deserializer instance.
Raises a Deserializer.FormatError if the supplied serialized representation is invalid.
Always returns a valid object unless an exception is raised.
This is not a part of the API.
"""
raise NotImplementedError
@staticmethod
def _restore_constant_(encoded_string: str) -> object:
"""Recovers a pickled gzipped constant object from base85 string representation."""
out = pickle.loads(gzip.decompress(base64.b85decode(encoded_string)))
assert isinstance(out, object)
return out
# These typing hints are provided here for use in the generated classes. They are obviously not part of the API.
_SerializerTypeVar_ = typing.TypeVar("_SerializerTypeVar_", bound=_serialized_representation.Serializer)
_DeserializerTypeVar_ = typing.TypeVar("_DeserializerTypeVar_", bound=_serialized_representation.Deserializer)
class ServiceObject(CompositeObject):
"""
This is the base class for all Python classes generated from DSDL service type definitions.
Observe that it inherits from the composite object class, just like the nested types Request and Response.
"""
Request: typing.Type[CompositeObject]
"""
Nested request type. Inherits from :class:`CompositeObject`.
The base class provides a stub which is overridden in generated classes.
"""
Response: typing.Type[CompositeObject]
"""
Nested response type. Inherits from :class:`CompositeObject`.
The base class provides a stub which is overridden in generated classes.
"""
_EXTENT_BYTES_ = 0
def _serialize_(self, _ser_: _serialized_representation.Serializer) -> None:
raise TypeError(f"Service type {type(self).__name__} cannot be serialized")
@staticmethod
def _deserialize_(_des_: _serialized_representation.Deserializer) -> CompositeObject:
raise TypeError("Service types cannot be deserialized")
class FixedPortObject(abc.ABC):
"""
This is the base class for all Python classes generated from DSDL types that have a fixed port identifier.
"""
_FIXED_PORT_ID_: int
class FixedPortCompositeObject(CompositeObject, FixedPortObject):
@abc.abstractmethod
def _serialize_(self, _ser_: _serialized_representation.Serializer) -> None:
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def _deserialize_(_des_: _serialized_representation.Deserializer) -> CompositeObject:
raise NotImplementedError
class FixedPortServiceObject(ServiceObject, FixedPortObject):
pass
CompositeObjectTypeVar = typing.TypeVar("CompositeObjectTypeVar", bound=CompositeObject)
def serialize(obj: CompositeObject) -> typing.Iterable[memoryview]:
"""
Constructs a serialized representation of the provided top-level object.
The resulting serialized representation is padded to one byte in accordance with the UAVCAN specification.
The constructed serialized representation is returned as a sequence of byte-aligned fragments which must be
concatenated in order to obtain the final representation.
The objective of this model is to avoid copying data into a temporary buffer when possible.
Each yielded fragment is of type :class:`memoryview` pointing to raw unsigned bytes.
It is guaranteed that at least one fragment is always returned (which may be empty).
"""
# TODO: update the Serializer class to emit an iterable of fragments.
ser = _serialized_representation.Serializer.new(obj._EXTENT_BYTES_) # pylint: disable=protected-access
obj._serialize_(ser) # pylint: disable=protected-access
yield ser.buffer.data
def deserialize(
dtype: typing.Type[CompositeObjectTypeVar], fragmented_serialized_representation: typing.Sequence[memoryview]
) -> typing.Optional[CompositeObjectTypeVar]:
"""
Constructs an instance of the supplied DSDL-generated data type from its serialized representation.
Returns None if the provided serialized representation is invalid.
This function will never raise an exception for invalid input data; the only possible outcome of an invalid data
being supplied is None at the output. A raised exception can only indicate an error in the deserialization logic.
.. important:: The constructed object may contain arrays referencing the memory allocated for the serialized
representation. Therefore, in order to avoid unintended data corruption, the caller should destroy all
references to the serialized representation immediately after the invocation.
.. important:: The supplied fragments of the serialized representation should be writeable.
If they are not, some of the array-typed fields of the constructed object may be read-only.
"""
deserializer = _serialized_representation.Deserializer.new(fragmented_serialized_representation)
try:
return dtype._deserialize_(deserializer) # type: ignore # pylint: disable=protected-access
except _serialized_representation.Deserializer.FormatError:
_logger.info("Invalid serialized representation of %s: %s", get_model(dtype), deserializer, exc_info=True)
return None
def get_model(class_or_instance: typing.Union[typing.Type[CompositeObject], CompositeObject]) -> pydsdl.CompositeType:
"""
Obtains a PyDSDL model of the supplied DSDL-generated class or its instance.
This is the inverse of :func:`get_class`.
"""
out = class_or_instance._MODEL_ # pylint: disable=protected-access
assert isinstance(out, pydsdl.CompositeType)
return out
def get_class(model: pydsdl.CompositeType) -> typing.Type[CompositeObject]:
"""
Returns a generated native class implementing the specified DSDL type represented by its PyDSDL model object.
Promotes the model to delimited type automatically if necessary.
This is the inverse of :func:`get_model`.
:raises:
- :class:`ImportError` if the generated package or subpackage cannot be found.
- :class:`AttributeError` if the package is found but it does not contain the requested type.
- :class:`TypeError` if the requested type is found, but its model does not match the input argument.
This error may occur if the DSDL source has changed since the type was generated.
To fix this, regenerate the package and make sure that all components of the application use identical
or compatible DSDL source files.
"""
def do_import(name_components: typing.List[str]) -> typing.Any:
mod = None
for comp in name_components:
name = (mod.__name__ + "." + comp) if mod else comp # type: ignore
try:
mod = importlib.import_module(name)
except ImportError: # We seem to have hit a reserved word; try with an underscore.
mod = importlib.import_module(name + "_")
return mod
if model.has_parent_service: # uavcan.node.GetInfo.Request --> uavcan.node.GetInfo then Request
parent_name, child_name = model.name_components[-2:]
mod = do_import(model.name_components[:-2])
out = getattr(mod, f"{parent_name}_{model.version.major}_{model.version.minor}")
assert issubclass(out, ServiceObject)
out = getattr(out, child_name)
else:
mod = do_import(model.name_components[:-1])
out = getattr(mod, f"{model.short_name}_{model.version.major}_{model.version.minor}")
out_model = get_model(out)
if out_model.inner_type != model.inner_type:
raise TypeError(
f"The class has been generated using an incompatible DSDL definition. "
f"Requested model: {model} defined in {model.source_file_path}. "
f"Model found in the class: {out_model} defined in {out_model.source_file_path}."
)
assert str(get_model(out)) == str(model)
assert isinstance(out, type)
assert issubclass(out, CompositeObject)
return out
def get_extent_bytes(class_or_instance: typing.Union[typing.Type[CompositeObject], CompositeObject]) -> int:
return int(class_or_instance._EXTENT_BYTES_) # pylint: disable=protected-access
def get_fixed_port_id(
class_or_instance: typing.Union[typing.Type[FixedPortObject], FixedPortObject]
) -> typing.Optional[int]:
"""
Returns None if the supplied type has no fixed port-ID.
"""
try:
out = int(class_or_instance._FIXED_PORT_ID_) # pylint: disable=protected-access
except (TypeError, AttributeError):
return None
else:
if (isinstance(class_or_instance, type) and issubclass(class_or_instance, CompositeObject)) or isinstance(
class_or_instance, CompositeObject
): # pragma: no branch
assert out == get_model(class_or_instance).fixed_port_id
return out
def get_attribute(obj: typing.Union[CompositeObject, typing.Type[CompositeObject]], name: str) -> typing.Any:
"""
DSDL type attributes whose names can't be represented in Python (such as ``def`` or ``type``)
are suffixed with an underscore.
This function allows the caller to read arbitrary attributes referring to them by their original
DSDL names, e.g., ``def`` instead of ``def_``.
This function behaves like :func:`getattr` if the attribute does not exist.
"""
try:
return getattr(obj, name)
except AttributeError:
return getattr(obj, name + "_")
def set_attribute(obj: CompositeObject, name: str, value: typing.Any) -> None:
"""
DSDL type attributes whose names can't be represented in Python (such as ``def`` or ``type``)
are suffixed with an underscore.
This function allows the caller to assign arbitrary attributes referring to them by their original DSDL names,
e.g., ``def`` instead of ``def_``.
If the attribute does not exist, raises :class:`AttributeError`.
"""
suffixed = name + "_"
# We can't call setattr() without asking first because if it doesn't exist it will be created,
# which would be disastrous.
if hasattr(obj, name):
setattr(obj, name, value)
elif hasattr(obj, suffixed):
setattr(obj, suffixed, value)
else:
raise AttributeError(name)
|
Drooids/erpnext
|
refs/heads/develop
|
erpnext/setup/page/setup_wizard/test_setup_wizard.py
|
45
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.setup.page.setup_wizard.test_setup_data import args
from erpnext.setup.page.setup_wizard.setup_wizard import setup_account
import frappe.utils.scheduler
if __name__=="__main__":
frappe.connect()
frappe.local.form_dict = frappe._dict(args)
setup_account()
frappe.utils.scheduler.disable_scheduler()
|
minhphung171093/GreenERP
|
refs/heads/master
|
openerp/addons/delivery/models/__init__.py
|
41
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import delivery_carrier
import delivery_price_rule
import product_template
import sale_order
import partner
import stock_picking
import stock_move
|
flyher/pymo
|
refs/heads/master
|
symbian/PythonForS60_1.9.6/module-repo/standard-modules/BaseHTTPServer.py
|
91
|
"""HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
import mimetools
import SocketServer
# Default error message
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = "HTTP/0.9" # Default
self.close_connection = 1
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
[command, path] = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", "text/html")
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(*args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
|
happyWilliam/qzb-api
|
refs/heads/master
|
sdk/Python/PhalApiClient/python2.x/PhalApiClient.py
|
3
|
#-*- coding:utf-8 -*-
#gaoyiping (iam@gaoyiping.com) 2017-02-18
import json, urllib, urllib2
def PhalApiClient(host, service = None, params = None, timeout = None):
url = host + ('' if service is None else ('?service=' + service))
if params is not None:
assert type(params) is dict, 'params type must be dict'
assert params, 'params must is valid values'
params = urllib.urlencode(params)
request = urllib2.Request(url)
response = urllib2.urlopen(request, data = params, timeout = timeout)
return {'info': response.info(), 'state': response.getcode(), 'data': json.loads(response.read())}
|
alistairlow/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/masked_autoregressive_test.py
|
8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MaskedAutoregressiveFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import masked_autoregressive_default_template
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import MaskedAutoregressiveFlow
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive_impl import _gen_mask
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
class GenMaskTest(test.TestCase):
def test346Exclusive(self):
expected_mask = np.array(
[[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0]])
mask = _gen_mask(num_blocks=3, n_in=4, n_out=6, mask_type="exclusive")
self.assertAllEqual(expected_mask, mask)
def test346Inclusive(self):
expected_mask = np.array(
[[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]])
mask = _gen_mask(num_blocks=3, n_in=4, n_out=6, mask_type="inclusive")
self.assertAllEqual(expected_mask, mask)
class MaskedAutoregressiveFlowTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=False),
"is_constant_jacobian": False,
}
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4, 2)
with self.test_session() as sess:
ma = MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs)
x = constant_op.constant(x_)
forward_x = ma.forward(x)
# Use identity to invalidate cache.
inverse_y = ma.inverse(array_ops.identity(forward_x))
fldj = ma.forward_log_det_jacobian(x)
# Use identity to invalidate cache.
ildj = ma.inverse_log_det_jacobian(array_ops.identity(forward_x))
variables.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
ildj,
fldj,
])
self.assertEqual("masked_autoregressive_flow", ma.name)
self.assertAllClose(forward_x_, forward_x_, rtol=1e-6, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-5, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
def testMutuallyConsistent(self):
dims = 4
with self.test_session() as sess:
ma = MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ma,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
dims = 4
with self.test_session() as sess:
ma = Invert(MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ma,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
class MaskedAutoregressiveFlowShiftOnlyTest(MaskedAutoregressiveFlowTest):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=True),
"is_constant_jacobian": True,
}
if __name__ == "__main__":
test.main()
|
haxwithaxe/supybot
|
refs/heads/master
|
plugins/Success/config.py
|
15
|
###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Success', True)
Success = conf.registerPlugin('Success')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Success, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
conf.registerChannelValue(conf.supybot.plugins.Success, 'prefixNick',
registry.Boolean(True, """Determines whether the bot will prefix the nick
of the user giving an invalid command to the success response."""))
# vim:set shiftwidth=4 softtabstop=8 expandtab textwidth=78
|
Incoin/incoin
|
refs/heads/master
|
contrib/wallettools/walletunlock.py
|
782
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9332")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
starrybeam/samba
|
refs/heads/master
|
third_party/pep8/testsuite/E21.py
|
62
|
#: E211
spam (1)
#: E211 E211
dict ['key'] = list [index]
#: E211
dict['key'] ['subkey'] = list[index]
#: Okay
spam(1)
dict['key'] = list[index]
# This is not prohibited by PEP8, but avoid it.
class Foo (Bar, Baz):
pass
|
pigeonflight/strider-plone
|
refs/heads/master
|
docker/appengine/lib/django-1.4/django/contrib/gis/gdal/tests/__init__.py
|
332
|
"""
Module for executing all of the GDAL tests. None
of these tests require the use of the database.
"""
from django.utils.unittest import TestSuite, TextTestRunner
# Importing the GDAL test modules.
import test_driver, test_ds, test_envelope, test_geom, test_srs
test_suites = [test_driver.suite(),
test_ds.suite(),
test_envelope.suite(),
test_geom.suite(),
test_srs.suite(),
]
def suite():
"Builds a test suite for the GDAL tests."
s = TestSuite()
map(s.addTest, test_suites)
return s
def run(verbosity=1):
"Runs the GDAL tests."
TextTestRunner(verbosity=verbosity).run(suite())
|
tilacog/rows
|
refs/heads/develop
|
rows/fields.py
|
1
|
# coding: utf-8
# Copyright 2014-2015 Álvaro Justen <https://github.com/turicas/rows/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import collections
import datetime
import locale
import re
import types
from decimal import Decimal, InvalidOperation
# Order matters here
__all__ = ['BoolField', 'IntegerField', 'FloatField', 'DatetimeField',
'DateField', 'DecimalField', 'PercentField', 'UnicodeField',
'ByteField', 'Field']
REGEXP_ONLY_NUMBERS = re.compile('[^0-9]')
SHOULD_NOT_USE_LOCALE = True # This variable is changed by rows.locale_manager
NULL = (b'-', b'null', b'none', b'nil')
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the ByteField, this Field
actually implements what is expected in the ByteField
"""
TYPE = types.NoneType
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
ByteField
"""
if value is None:
value = ''
return value
@classmethod
def deserialize(cls, value, *args, **kwargs):
"""Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`.
"""
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
else:
return value
class ByteField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
TYPE = types.StringType
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
value = ''
return cls.TYPE(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None:
return None
else:
return cls.TYPE(value)
class BoolField(Field):
"""Base class to representing boolean
Is not locale-aware (if you need to, please customize by changing its
attributes like `TRUE_VALUES` and `FALSE_VALUES`)
"""
TYPE = types.BooleanType
SERIALIZED_VALUES = {True: 'true', False: 'false', None: ''}
TRUE_VALUES = (b'true', b'1', b'yes')
FALSE_VALUES = (b'false', b'0', b'no')
@classmethod
def serialize(cls, value, *args, **kwargs):
return cls.SERIALIZED_VALUES[value]
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(BoolField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if value in cls.TRUE_VALUES:
return True
elif value in cls.FALSE_VALUES:
return False
else:
raise ValueError('Value is not boolean')
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
TYPE = types.IntType
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ''
if SHOULD_NOT_USE_LOCALE:
return types.UnicodeType(value)
else:
grouping = kwargs.get('grouping', None)
return locale.format('%d', value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
converted = int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
float_equivalent = FloatField.deserialize(value, *args, **kwargs)
if float_equivalent == converted:
return converted
else:
raise ValueError("It's float, not integer")
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
TYPE = types.FloatType
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ''
if SHOULD_NOT_USE_LOCALE:
return types.UnicodeType(value)
else:
grouping = kwargs.get('grouping', None)
return locale.format('%f', value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
class DecimalField(Field):
"""Field class to represent decimal data (as Python's decimal.Decimal)
Is locale-aware
"""
TYPE = Decimal
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ''
value_as_string = types.UnicodeType(value)
if SHOULD_NOT_USE_LOCALE:
return value_as_string
else:
grouping = kwargs.get('grouping', None)
has_decimal_places = value_as_string.find('.') != -1
if not has_decimal_places:
string_format = '%d'
else:
decimal_places = len(value_as_string.split('.')[1])
string_format = '%.{}f'.format(decimal_places)
return locale.format(string_format, value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DecimalField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
if SHOULD_NOT_USE_LOCALE:
try:
return Decimal(value)
except InvalidOperation:
raise ValueError("Can't be {}".format(cls.__name__))
else:
locale_vars = locale.localeconv()
decimal_separator = locale_vars['decimal_point']
interesting_vars = ['decimal_point', 'mon_decimal_point',
'mon_thousands_sep', 'negative_sign',
'positive_sign', 'thousands_sep']
chars = (locale_vars[x].replace('.', '\.').replace('-', '\-')
for x in interesting_vars)
interesting_chars = ''.join(set(chars))
regexp = re.compile(r'[^0-9{} ]'.format(interesting_chars))
if regexp.findall(value):
raise ValueError("Can't be {}".format(cls.__name__))
parts = [REGEXP_ONLY_NUMBERS.subn('', number)[0]
for number in value.split(decimal_separator)]
if len(parts) > 2:
raise ValueError("Can't deserialize with this locale.")
try:
value = Decimal(parts[0])
if len(parts) == 2:
decimal_places = len(parts[1])
value = value + (Decimal(parts[1]) / (10 ** decimal_places))
except InvalidOperation:
raise ValueError("Can't be {}".format(cls.__name__))
return value
class PercentField(DecimalField):
"""Field class to represent percent values
Is locale-aware (inherit this behaviour from `rows.DecimalField`)
"""
@classmethod
def serialize(cls, value, *args, **kwargs):
value = Decimal(str(value * 100)[:-2])
value = super(PercentField, cls).serialize(value, *args, **kwargs)
return '{}%'.format(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
value = as_string(value)
if '%' not in value:
raise ValueError("Can't be {}".format(cls.__name__))
value = value.replace('%', '')
return super(PercentField, cls).deserialize(value) / 100
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
TYPE = datetime.date
INPUT_FORMAT = '%Y-%m-%d'
OUTPUT_FORMAT = '%Y-%m-%d'
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ''
return types.UnicodeType(value.strftime(cls.OUTPUT_FORMAT))
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DateField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT)
return datetime.date(dt_object.year, dt_object.month, dt_object.day)
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = datetime.datetime
DATETIME_REGEXP = re.compile('^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]'
'([0-9]{2}):([0-9]{2}):([0-9]{2})$')
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ''
return types.UnicodeType(value.isoformat())
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DatetimeField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
# TODO: may use iso8601
groups = cls.DATETIME_REGEXP.findall(value)
if not groups:
raise ValueError("Can't be {}".format(cls.__name__))
else:
return datetime.datetime(*[int(x) for x in groups[0]])
class UnicodeField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = types.UnicodeType
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(UnicodeField, cls).deserialize(value)
if value is None:
return None
if type(value) is cls.TYPE:
return value
elif 'encoding' in kwargs:
return as_string(value).decode(kwargs['encoding'])
else:
return cls.TYPE(value)
AVAILABLE_FIELD_TYPES = [locals()[element] for element in __all__
if 'Field' in element and element != 'Field']
def as_string(value):
if isinstance(value, types.StringTypes):
return value
else:
return types.StringType(value)
def is_null(value):
if value is None:
return True
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
def detect_types(field_names, field_values, field_types=AVAILABLE_FIELD_TYPES,
*args, **kwargs):
"""Where the magic happens"""
# TODO: may receive 'type hints'
# TODO: should support receiving unicode objects directly
# TODO: should expect data in unicode or will be able to use binary data?
number_of_fields = len(field_names)
columns = zip(*[row for row in field_values
if len(row) == number_of_fields])
if len(columns) != number_of_fields:
raise ValueError('Number of fields differ')
none_type = set([type(None)])
detected_types = collections.OrderedDict([(field_name, None)
for field_name in field_names])
encoding = kwargs.get('encoding', None)
for index, field_name in enumerate(field_names):
data = set([value for value in set(columns[index])
if not is_null(value)])
if not data:
# all rows with an empty field -> ByteField (can't identify)
identified_type = ByteField
else:
# ok, let's try to identify the type of this column by
# converting every non-null value in the sample
possible_types = list(field_types)
for value in data:
cant_be = set()
for type_ in possible_types:
try:
type_.deserialize(value, *args, **kwargs)
except (ValueError, TypeError):
cant_be.add(type_)
for type_to_remove in cant_be:
possible_types.remove(type_to_remove)
identified_type = possible_types[0] # priorities matter
detected_types[field_name] = identified_type
return detected_types
|
dellis23/parsedatetime
|
refs/heads/master
|
parsedatetime/tests/TestSimpleOffsets.py
|
6
|
"""
Test parsing of 'simple' offsets
"""
import time
import datetime
import calendar
import unittest
import parsedatetime as pdt
def _truncateResult(result, trunc_seconds=True, trunc_hours=False):
try:
dt, flag = result
except ValueError:
# wtf?!
return result
if trunc_seconds:
dt = dt[:5] + (0,) * 4
if trunc_hours:
dt = dt[:3] + (0,) * 6
return dt, flag
_tr = _truncateResult
class test(unittest.TestCase):
@pdt.tests.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return pdt.tests.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testNow(self):
s = datetime.datetime.now()
start = s.timetuple()
target = s.timetuple()
self.assertExpectedResult(self.cal.parse('now', start), (target, 2))
def testMinutesFromNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(minutes=5)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(self.cal.parse('5 minutes from now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5 min from now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5m from now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('in 5 minutes', start), (target, 2))
self.assertExpectedResult(self.cal.parse('in 5 min', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5 minutes', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5 min', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5m', start), (target, 2))
self.assertExpectedResult(self.cal.parse('five minutes from now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('five min from now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('in five minutes', start), (target, 2))
self.assertExpectedResult(self.cal.parse('in five min', start), (target, 2))
self.assertExpectedResult(self.cal.parse('five minutes', start), (target, 2))
self.assertExpectedResult(self.cal.parse('five min', start), (target, 2))
def testMinutesBeforeNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(minutes=-5)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(self.cal.parse('5 minutes before now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5 min before now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5m before now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('5 minutes ago', start), (target, 2))
self.assertExpectedResult(self.cal.parse('five minutes before now', start), (target, 2))
self.assertExpectedResult(self.cal.parse('five min before now', start), (target, 2))
def testWeekFromNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(weeks=1)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(self.cal.parse('in 1 week', start), (target, 1))
self.assertExpectedResult(self.cal.parse('1 week from now', start), (target, 3))
self.assertExpectedResult(self.cal.parse('in one week', start), (target, 1))
self.assertExpectedResult(self.cal.parse('one week from now', start), (target, 3))
self.assertExpectedResult(self.cal.parse('in a week', start), (target, 1))
self.assertExpectedResult(self.cal.parse('a week from now', start), (target, 3))
self.assertExpectedResult(self.cal.parse('in 7 days', start), (target, 1))
self.assertExpectedResult(self.cal.parse('7 days from now', start), (target, 3))
self.assertExpectedResult(self.cal.parse('in seven days', start), (target, 1))
self.assertExpectedResult(self.cal.parse('seven days from now', start), (target, 3))
self.assertEqual(_tr(self.cal.parse('next week', start),
trunc_hours=True),
_tr((target, 1), trunc_hours=True))
def testNextWeekDay(self):
start = datetime.datetime.now()
target = start + datetime.timedelta(days=4 + 7 - start.weekday())
start = start.timetuple()
target = target.timetuple()
self.assertExpectedResult(self.cal.parse('next friday', start),
(target, 1), dateOnly=True)
self.assertExpectedResult(self.cal.parse('next friday?', start),
(target, 1), dateOnly=True)
self.cal.ptc.StartTimeFromSourceTime = True
self.assertExpectedResult(self.cal.parse('next friday', start),
(target, 1))
def testWeekBeforeNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(weeks=-1)
start = s.timetuple()
target = t.timetuple()
self.assertEqual(_tr(self.cal.parse('1 week before now', start)),
_tr((target, 3)))
self.assertEqual(_tr(self.cal.parse('one week before now', start)),
_tr((target, 3)))
self.assertEqual(_tr(self.cal.parse('a week before now', start)),
_tr((target, 3)))
self.assertEqual(_tr(self.cal.parse('7 days before now', start)),
_tr((target, 3)))
self.assertEqual(_tr(self.cal.parse('seven days before now', start)),
_tr((target, 3)))
self.assertEqual(_tr(self.cal.parse('1 week ago', start)),
_tr((target, 1)))
self.assertEqual(_tr(self.cal.parse('a week ago', start)),
_tr((target, 1)))
self.assertEqual(_tr(self.cal.parse('last week', start), trunc_hours=True),
_tr((target, 1), trunc_hours=True))
def testNextMonth(self):
s = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec) + datetime.timedelta(days=1)
t = self.cal.inc(s, year=1)
start = s.timetuple()
target = t.timetuple()
phrase = 'next %s %s' % (calendar.month_name[t.month], t.day)
self.assertEqual(_tr(self.cal.parse(phrase, start)),
_tr((target, 1)))
def testSpecials(self):
s = datetime.datetime.now()
t = datetime.datetime(self.yr, self.mth, self.dy, 9, 0, 0) + datetime.timedelta(days=1)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(self.cal.parse('tomorrow', start), (target, 1))
self.assertExpectedResult(self.cal.parse('next day', start), (target, 1))
t = datetime.datetime(self.yr, self.mth, self.dy, 9, 0, 0) + datetime.timedelta(days=-1)
target = t.timetuple()
self.assertExpectedResult(self.cal.parse('yesterday', start), (target, 1))
t = datetime.datetime(self.yr, self.mth, self.dy, 9, 0, 0)
target = t.timetuple()
self.assertExpectedResult(self.cal.parse('today', start), (target, 1))
if __name__ == "__main__":
unittest.main()
|
malikcjm/qtcreator
|
refs/heads/work
|
tests/system/suite_general/tst_installed_languages/test.py
|
3
|
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def main():
for lang in testData.dataset("languages.tsv"):
overrideStartApplication()
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Environment")
clickItem(":Options_QListView", "Environment", 14, 15, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "General")
languageName = testData.field(lang, "language")
selectFromCombo(":User Interface.languageBox_QComboBox", languageName)
clickButton(waitForObject(":Options.OK_QPushButton"))
clickButton(waitForObject(":Restart required.OK_QPushButton"))
invokeMenuItem("File", "Exit")
waitForCleanShutdown()
snooze(4) # wait for complete unloading of Creator
overrideStartApplication()
startApplication("qtcreator" + SettingsPath)
try:
invokeMenuItem(testData.field(lang, "File"), testData.field(lang, "Exit"))
test.passes("Creator was running in %s translation." % languageName)
except:
test.fail("Creator seems to be missing %s translation" % languageName)
sendEvent("QCloseEvent", ":Qt Creator_Core::Internal::MainWindow")
waitForCleanShutdown()
__removeTestingDir__()
copySettingsToTmpDir()
|
darouwan/shadowsocks
|
refs/heads/master
|
shadowsocks/tcprelay.py
|
922
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
|
don-github/edx-platform
|
refs/heads/master
|
common/djangoapps/cache_toolbox/core.py
|
136
|
"""
Core methods
------------
.. autofunction:: cache_toolbox.core.get_instance
.. autofunction:: cache_toolbox.core.delete_instance
.. autofunction:: cache_toolbox.core.instance_key
"""
from django.core.cache import cache
from django.db import DEFAULT_DB_ALIAS
from opaque_keys import InvalidKeyError
from . import app_settings
def get_instance(model, instance_or_pk, timeout=None, using=None):
"""
Returns the ``model`` instance with a primary key of ``instance_or_pk``.
If the data is cached it will be returned from there, otherwise the regular
Django ORM is queried for this instance and the data stored in the cache.
If omitted, the timeout value defaults to
``settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT`` instead of 0 (zero).
Example::
>>> get_instance(User, 1) # Cache miss
<User: lamby>
>>> get_instance(User, 1) # Cache hit
<User: lamby>
>>> User.objects.get(pk=1) == get_instance(User, 1)
True
"""
pk = getattr(instance_or_pk, 'pk', instance_or_pk)
key = instance_key(model, instance_or_pk)
data = cache.get(key)
if data is not None:
try:
# Try and construct instance from dictionary
instance = model(pk=pk, **data)
# Ensure instance knows that it already exists in the database,
# otherwise we will fail any uniqueness checks when saving the
# instance.
instance._state.adding = False
# Specify database so that instance is setup correctly. We don't
# namespace cached objects by their origin database, however.
instance._state.db = using or DEFAULT_DB_ALIAS
return instance
except:
# Error when deserialising - remove from the cache; we will
# fallback and return the underlying instance
cache.delete(key)
# Use the default manager so we are never filtered by a .get_query_set()
# import logging
# log = logging.getLogger("tracking")
# log.info( str(pk) )
instance = model._default_manager.using(using).get(pk=pk)
data = {}
for field in instance._meta.fields:
# Harmless to save, but saves space in the dictionary - we already know
# the primary key when we lookup
if field.primary_key:
continue
if field.get_internal_type() == 'FileField':
# Avoid problems with serializing FileFields
# by only serializing the file name
file = getattr(instance, field.attname)
data[field.attname] = file.name
else:
data[field.attname] = getattr(instance, field.attname)
if timeout is None:
timeout = app_settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT
cache.set(key, data, timeout)
return instance
def delete_instance(model, *instance_or_pk):
"""
Purges the cache keys for the instances of this model.
"""
cache.delete_many([instance_key(model, x) for x in instance_or_pk])
def instance_key(model, instance_or_pk):
"""
Returns the cache key for this (model, instance) pair.
"""
return '%s.%s:%d' % (
model._meta.app_label,
model._meta.module_name,
getattr(instance_or_pk, 'pk', instance_or_pk),
)
def set_cached_content(content):
cache.set(unicode(content.location).encode("utf-8"), content)
def get_cached_content(location):
return cache.get(unicode(location).encode("utf-8"))
def del_cached_content(location):
"""
delete content for the given location, as well as for content with run=None.
it's possible that the content could have been cached without knowing the
course_key - and so without having the run.
"""
def location_str(loc):
return unicode(loc).encode("utf-8")
locations = [location_str(location)]
try:
locations.append(location_str(location.replace(run=None)))
except InvalidKeyError:
# although deprecated keys allowed run=None, new keys don't if there is no version.
pass
cache.delete_many(locations)
|
Neetuj/softlayer-python
|
refs/heads/master
|
SoftLayer/CLI/iscsi/list.py
|
2
|
"""List iSCSI targets."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer import utils
@click.command()
@environment.pass_env
def cli(env):
"""List iSCSI targets."""
iscsi_mgr = SoftLayer.ISCSIManager(env.client)
iscsi_list = iscsi_mgr.list_iscsi()
iscsi_list = [utils.NestedDict(n) for n in iscsi_list]
table = formatting.Table([
'id',
'datacenter',
'size',
'username',
'password',
'server'
])
for iscsi in iscsi_list:
table.add_row([
iscsi['id'],
utils.lookup(iscsi,
'serviceResource',
'datacenter',
'name') or formatting.blank(),
formatting.FormattedItem(iscsi.get('capacityGb',
formatting.blank()),
"%dGB" % iscsi.get('capacityGb', 0)),
iscsi.get('username', formatting.blank()),
iscsi.get('password', formatting.blank()),
iscsi.get('serviceResourceBackendIpAddress',
formatting.blank())])
env.fout(table)
|
JVillella/tensorflow
|
refs/heads/master
|
tensorflow/tools/common/public_api.py
|
71
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
|
blaggacao/odoo
|
refs/heads/master
|
addons/mail/__init__.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_attachment
import mail_message_subtype
import mail_alias
import mail_followers
import mail_message
import mail_mail
import mail_thread
import mail_group
import res_partner
import res_users
import report
import wizard
import res_config
import mail_group_menu
import update
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
etingof/pyasn1
|
refs/heads/master
|
pyasn1/type/char.py
|
2
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2020, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
from pyasn1 import error
from pyasn1.type import tag
from pyasn1.type import univ
__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
NoValue = univ.NoValue
noValue = univ.noValue
class AbstractCharacterString(univ.OctetString):
"""Creates |ASN.1| schema or value object.
|ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
its objects are immutable and duck-type Python 2 :class:`str` or Python 3
:class:`bytes`. When used in octet-stream context, |ASN.1| type assumes
"|encoding|" encoding.
Keyword Args
------------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
:class:`unicode` object (Python 2) or :class:`str` (Python 3),
alternatively :class:`str` (Python 2) or :class:`bytes` (Python 3)
representing octet-stream of serialised unicode string
(note `encoding` parameter) or |ASN.1| class instance.
If `value` is not given, schema object will be created.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s). Constraints
verification for |ASN.1| type occurs automatically on object
instantiation.
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Raises
------
~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
On constraint violation or bad initializer.
"""
if sys.version_info[0] <= 2:
def __str__(self):
try:
# `str` is Py2 text representation
return self._value.encode(self.encoding)
except UnicodeEncodeError:
exc = sys.exc_info()[1]
raise error.PyAsn1UnicodeEncodeError(
"Can't encode string '%s' with codec "
"%s" % (self._value, self.encoding), exc
)
def __unicode__(self):
return unicode(self._value)
def prettyIn(self, value):
try:
if isinstance(value, unicode):
return value
elif isinstance(value, str):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(''.join([chr(x) for x in value]))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return unicode(value)
except (UnicodeDecodeError, LookupError):
exc = sys.exc_info()[1]
raise error.PyAsn1UnicodeDecodeError(
"Can't decode string '%s' with codec "
"%s" % (value, self.encoding), exc
)
def asOctets(self, padding=True):
return str(self)
def asNumbers(self, padding=True):
return tuple([ord(x) for x in str(self)])
else:
def __str__(self):
# `unicode` is Py3 text representation
return str(self._value)
def __bytes__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
exc = sys.exc_info()[1]
raise error.PyAsn1UnicodeEncodeError(
"Can't encode string '%s' with codec "
"%s" % (self._value, self.encoding), exc
)
def prettyIn(self, value):
try:
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return str(value)
except (UnicodeDecodeError, LookupError):
exc = sys.exc_info()[1]
raise error.PyAsn1UnicodeDecodeError(
"Can't decode string '%s' with codec "
"%s" % (value, self.encoding), exc
)
def asOctets(self, padding=True):
return bytes(self)
def asNumbers(self, padding=True):
return tuple(bytes(self))
#
# See OctetString.prettyPrint() for the explanation
#
def prettyOut(self, value):
return value
def prettyPrint(self, scope=0):
# first see if subclass has its own .prettyOut()
value = self.prettyOut(self._value)
if value is not self._value:
return value
return AbstractCharacterString.__str__(self)
def __reversed__(self):
return reversed(self._value)
class NumericString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class PrintableString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class TeletexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class T61String(TeletexString):
__doc__ = TeletexString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VideotexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class IA5String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GraphicString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VisibleString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class ISO646String(VisibleString):
__doc__ = VisibleString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GeneralString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UniversalString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class BMPString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UTF8String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
|
lesteve/sphinx-gallery
|
refs/heads/master
|
mayavi_examples/plot_3d.py
|
4
|
# -*- coding: utf-8 -*-
"""
Plotting simple 3D graph with Mayavi
====================================
A simple example of the plot of a 3D graph with Mayavi
in order to test the autonomy of the gallery.
"""
# Code source: Alex Gramfort
# License: BSD 3 clause
# This will show the mlab.test_mesh figure in the gallery
# sphinx_gallery_thumbnail_number = 4
from mayavi import mlab
mlab.test_plot3d()
mlab.figure()
mlab.test_contour3d()
#####################################################################
# Note: this shows that inside a cell matplotlib figures are always
# put before mayavi in the example HTML. In other words, the order of
# plotting is not respected between matplotlib and mayavi figures
mlab.figure()
mlab.test_mesh()
mlab.figure()
mlab.test_flow()
import matplotlib.pyplot as plt
plt.plot([1, 2, 3], [1, 2, 3])
|
bq/bitbloq-offline
|
refs/heads/master
|
app/res/web2board/darwin/Web2Board.app/Contents/Resources/platformio/platforms/linux_arm.py
|
5
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio import exception, util
from platformio.platforms.base import BasePlatform
class Linux_armPlatform(BasePlatform):
"""
Linux ARM is a Unix-like and mostly POSIX-compliant computer
operating system (OS) assembled under the model of free and open-source
software development and distribution.
Using host OS (Mac OS X, Linux ARM) you can build native application
for Linux ARM platform.
http://platformio.org/#!/platforms/linux_arm
"""
PACKAGES = {
"toolchain-gccarmlinuxgnueabi": {
"alias": "toolchain",
"default": True
},
"framework-wiringpi": {
"alias": "framework"
}
}
def __init__(self):
if "linux_arm" in util.get_systype():
del self.PACKAGES['toolchain-gccarmlinuxgnueabi']
BasePlatform.__init__(self)
def configure_default_packages(self, envoptions, targets):
if (envoptions.get("framework") == "wiringpi" and
"linux_arm" not in util.get_systype()):
raise exception.PlatformioException(
"PlatformIO does not support temporary cross-compilation "
"for WiringPi framework. Please run PlatformIO directly on "
"Raspberry Pi"
)
return BasePlatform.configure_default_packages(
self, envoptions, targets)
|
dbckz/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_acl.py
|
72
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Phil Schwartz <schwartzmx@gmail.com>
# Copyright 2015, Trond Hindenes
# Copyright 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_acl
version_added: "2.0"
short_description: Set file/directory permissions for a system user or group.
description:
- Add or remove rights/permissions for a given user or group for the specified src file or folder.
- If adding ACL's for AppPool identities (available since 2.3), the Windows "Feature Web-Scripting-Tools" must be enabled
options:
path:
description:
- File or Directory
required: yes
user:
description:
- User or Group to add specified rights to act on src file/folder
required: yes
default: none
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified access rule
required: no
choices:
- present
- absent
default: present
type:
description:
- Specify whether to allow or deny the rights specified
required: yes
choices:
- allow
- deny
default: none
rights:
description:
- The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory.
Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights
Enumeration.
required: yes
choices:
- AppendData
- ChangePermissions
- Delete
- DeleteSubdirectoriesAndFiles
- ExecuteFile
- FullControl
- ListDirectory
- Modify
- Read
- ReadAndExecute
- ReadAttributes
- ReadData
- ReadExtendedAttributes
- ReadPermissions
- Synchronize
- TakeOwnership
- Traverse
- Write
- WriteAttributes
- WriteData
- WriteExtendedAttributes
default: none
inherit:
description:
- Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on
the choices see MSDN InheritanceFlags Enumeration.
required: no
choices:
- ContainerInherit
- ObjectInherit
- None
default: For Leaf File, None; For Directory, ContainerInherit, ObjectInherit;
propagation:
description:
- Propagation flag on the ACL rules. For more information on the choices see MSDN PropagationFlags Enumeration.
required: no
choices:
- None
- NoPropagateInherit
- InheritOnly
default: "None"
author: Phil Schwartz (@schwartzmx), Trond Hindenes (@trondhindenes), Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = r'''
- name: Restrict write and execute access to User Fed-Phil
win_acl:
user: Fed-Phil
path: C:\Important\Executable.exe
type: deny
rights: ExecuteFile,Write
- name: Add IIS_IUSRS allow rights
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
# Remove previously added rule for IIS_IUSRS
- name: Remove FullControl AccessRule for IIS_IUSRS
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: absent
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
# Deny Intern
- name: Deny Deny
win_acl:
path: C:\Administrator\Documents
user: Intern
rights: Read,Write,Modify,FullControl,Delete
type: deny
state: present
'''
|
rhuitl/uClinux
|
refs/heads/master
|
user/samba/examples/scripts/shares/python/generate_parm_table.py
|
52
|
#!/usr/bin/env python
######################################################################
##
## Generate parameter dictionary from param/loadparm.c
##
## Copyright (C) Gerald Carter 2004.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
######################################################################
import re, string, sys, commands
HEADER = """######################################################################
##
## autogenerated file of smb.conf parameters
## generate_parm_table <..../param/loadparm.c>
##
## Copyright (C) Gerald Carter 2004.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
######################################################################
from SambaParm import SambaParmString, SambaParmBool, SambaParmBoolRev
## boolean defines for parm_table
P_LOCAL = 0
P_GLOBAL = 1
"""
FOOTER = """##### end of smbparm.y ##########################################
#################################################################"""
TESTPARM = "/usr/bin/testparm"
## fields in Samba's parameter table
displayName = 0
type = 1
scope = 2
variable = 3
flags = 6
parm_table = {}
var_table = {}
def_values = {}
obj_table = {
'P_BOOL' : 'SambaParmBool',
'P_BOOLREV' : 'SambaParmBoolRev',
'P_STRING' : 'SambaParmString',
'P_USTRING' : 'SambaParmString',
'P_GSTRING' : 'SambaParmString',
'P_LIST' : 'SambaParmString',
'P_ENUM' : 'SambaParmString',
'P_CHAR' : 'SambaParmString',
'P_OCTAL' : 'SambaParmString',
'P_INTEGER' : 'SambaParmString',
}
######################################################################
## BEGIN MAIN CODE ##
######################################################################
## First thing is to build the dictionary of parmeter names ##
## based on the output from testparm ##
cmd = "/usr/bin/testparm -s -v /dev/null"
( status, testparm_output ) = commands.getstatusoutput( cmd )
if status:
sys.stderr.write( "Failed to execute testparm!\n%s\n" % testparm_output )
## break the output into a list ##
lines = string.split( testparm_output, "\n" )
## loop through list -- parameters in testparm output have ##
## whitespace at the beginning of the line ##
pattern = re.compile( "^\s+" )
for input_str in lines:
if not pattern.search( input_str ):
continue
input_str = string.strip( input_str )
parts = string.split( input_str, "=" )
parts[0] = string.strip( parts[0] )
parts[1] = string.strip( parts[1] )
key = string.upper( string.join(string.split(parts[0]), "") )
new = parts[1].replace('\\', '\\\\')
def_values[key] = new
## open loadparm.c and get the entire list of parameters ##
## including synonums ##
if len(sys.argv) != 2:
print "Usage: %s <.../param/loadparm.c>" % ( sys.argv[0] )
sys.exit( 1 )
try:
fconfig = open( sys.argv[1], "r" )
except IOError:
print "%s does not exist!" % sys.argv[1]
sys.exit (1)
## Loop through loadparm.c -- all parameters are either ##
## P_LOCAL or P_GLOBAL ##
synonyms = []
pattern = re.compile( '{".*P_[GL]' )
while True:
input_str= fconfig.readline()
if len(input_str) == 0 :
break
input_str= string.strip(input_str)
## see if we have a patch for a parameter definition ##
parm = []
if pattern.search( input_str) :
## strip the surrounding '{.*},' ##
input_str= input_str[1:-2]
parm = string.split(input_str, ",")
## strip the ""'s and upper case ##
name = (string.strip(parm[displayName])[1:-1])
key = string.upper( string.join(string.split(name), "") )
var_name = string.strip( parm[variable] )
## try to catch synonyms -- if the parameter was not reported ##
## by testparm, then save it and come back after we will out ##
## the variable list ##
if not def_values.has_key( key ):
synonyms.append( input_str)
continue
var_table[var_name] = key
parmType = string.strip(parm[type])
parm_table[key] = [ name , string.strip(parm[type]), string.strip(parm[scope]), def_values[key] ]
## Deal with any synonyms ##
for input_str in synonyms:
parm = string.split(input_str, ",")
name = (string.strip(parm[displayName])[1:-1])
key = string.upper( string.join(string.split(name), "") )
var_name = string.strip( parm[variable] )
## if there's no pre-existing key, then testparm doesn't know about it
if not var_table.has_key( var_name ):
continue
## just make a copy
parm_table[key] = parm_table[var_table[var_name]][:]
# parm_table[key][1] = parm[1]
parm_table[key][1] = string.strip(parm[1])
## ##
## print out smbparm.py ##
## ##
try:
smbparm = open ( "smbparm.py", "w" )
except IOError:
print "Cannot write to smbparm.py"
sys.exit( 1 )
smbparm.write( HEADER )
smbparm.write( "parm_table = {\n" )
for x in parm_table.keys():
key = "\"%s\"" % x
smbparm.write("\t%-25s: (\"%s\", %s, %s, \"%s\"),\n" % ( key, parm_table[x][0],
obj_table[parm_table[x][1]], parm_table[x][2], parm_table[x][3] ))
smbparm.write( "}\n" )
smbparm.write( FOOTER )
smbparm.write( "\n" )
sys.exit(0)
## ##
## cut-n-paste area ##
## ##
for x in parm_table.keys():
if def_values.has_key( x ):
parm_table[x].append( def_values[x] )
else:
parm_table[x].append( "" )
|
djangraw/PsychoPyParadigms
|
refs/heads/master
|
BasicExperiments/SingingTask_audio.py
|
1
|
#!/usr/bin/env python2
"""Display multi-page text with interspersed thought probes.
Then ask the subject comprehension questions at the end."""
# SingingTask_audio.py
#
# Created 4/12/17 by DJ based on SingingTask.py.
from psychopy import core, gui, data, event, sound, logging #, visual # visual causes a bug in the guis, so I moved it down.
from psychopy.tools.filetools import fromFile, toFile
from random import shuffle
import time, numpy as np
import AppKit, os # for monitor size detection, files
import PromptTools
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Save the parameters declared below?
saveParams = True;
newParamsFilename = 'SingingTaskParams.pickle'
# Declare primary task parameters.
params = {
'skipPrompts': True, # at the beginning
'tStartup': 10, # time after beginning of scan before starting first pre-trial message
'tempo_bpm': 90, # beats per minute (beat time = 60.0/tempo_bpm)
'trialTime': 20,#20.0,#20, # duration of song/exercise (in sec)... will be rounded down to nearest multiple of beat time
'msgTime': 6.0, # duration of pre-trial message (in sec)... will be rounded down to nearest multiple of beat time
'restTime': 14.0,#17, # duration of post-trial rest (in sec)... ITI = msgTime+restTime
'IBI': 14.0,#17, # time between end of block/probe and beginning of next block (in seconds)
'nBlocks': 3, # blocks for this run
'trialTypes': ['Scales','Speak','Sing'], # currently used only for instructions
'playSound': [False, True, True], # for each
'randomizeOrder': False, # randomize each block
'advanceKey': 'space', # key to skip block
'triggerKey': 't', # key from scanner that says scan is starting
'promptType': 'Default', # must correspond to keyword in PromptTools.py
# declare other stimulus parameters
'fullScreen': True, # run in full screen mode?
'screenToShow': 0, # display on primary screen (0) or secondary (1)?
'fixCrossSize': 50, # size of cross, in pixels
'fixCrossPos': (0,0), # (x,y) pos of fixation cross displayed before each page (for drift correction)
'usePhotodiode': False, # add sync square in corner of screen
#'textBoxSize': [800,600] # [640,360]# [700, 500] # width, height of text box (in pixels)
# declare sound params
'soundFile': 'music/Major_Chords_Low/Grand Piano - Fazioli - major A',
'soundVolume': 1,
'tSoundStart': 0,
'tSoundStop': 0 # will be edited to match specified trial time
}
params['tSoundStop'] = params['tSoundStart']+params['trialTime']
# save parameters
if saveParams:
dlgResult = gui.fileSaveDlg(prompt='Save Params...',initFilePath = os.getcwd() + '/Params', initFileName = newParamsFilename,
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
newParamsFilename = dlgResult
if newParamsFilename is None: # keep going, but don't save
saveParams = False
else:
toFile(newParamsFilename, params)# save it!
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
try:#try to get a previous parameters file
expInfo = fromFile('lastSingInfo.pickle')
expInfo['session'] +=1 # automatically increment session number
expInfo['paramsFile'] = [expInfo['paramsFile'],'Load...']
except:#if not there then use a default set
expInfo = {'subject':'1', 'session':1, 'paramsFile':['DEFAULT','Load...']}
# overwrite if you just saved a new parameter set
if saveParams:
expInfo['paramsFile'] = [newParamsFilename,'Load...']
dateStr = time.strftime("%b_%d_%H%M", time.localtime()) # add the current time
#present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title='Singing task', order=['subject','session','paramsFile'])
if not dlg.OK:
core.quit()#the user hit cancel so exit
# find parameter file
if expInfo['paramsFile'] == 'Load...':
dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
# load params file
params = fromFile(expInfo['paramsFile'])
# print params to Output
print 'params = {'
for key in sorted(params.keys()):
print " '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
# save experimental info
toFile('lastSingingInfo.pickle', expInfo)#save params to file for next time
#make a log file to save parameter/event data
filename = 'Singing-%s-%d-%s'%(expInfo['subject'], expInfo['session'], dateStr) #'Sart-' + expInfo['subject'] + '-' + expInfo['session'] + '-' + dateStr
logging.LogFile((filename+'.log'), level=logging.INFO)#, mode='w') # w=overwrite
logging.log(level=logging.INFO, msg='---START PARAMETERS---')
logging.log(level=logging.INFO, msg='filename: %s'%filename)
logging.log(level=logging.INFO, msg='subject: %s'%expInfo['subject'])
logging.log(level=logging.INFO, msg='session: %s'%expInfo['session'])
logging.log(level=logging.INFO, msg='date: %s'%dateStr)
for key in sorted(params.keys()): # in alphabetical order
logging.log(level=logging.INFO, msg='%s: %s'%(key,params[key]))
logging.log(level=logging.INFO, msg='---END PARAMETERS---')
# ========================== #
# ===== SET UP STIMULI ===== #
# ========================== #
from psychopy import visual
# kluge for secondary monitor
if params['fullScreen'] and params['screenToShow']>0:
screens = AppKit.NSScreen.screens()
screenRes = screens[params['screenToShow']].frame().size.width, screens[params['screenToShow']].frame().size.height
# screenRes = [1920, 1200]
params['fullScreen'] = False
else:
screenRes = [800,600]
# Initialize deadline for displaying next frame
tNextFlip = [0.0] # put in a list to make it mutable?
#create window and stimuli
globalClock = core.Clock()#to keep track of time
trialClock = core.Clock()#to keep track of time
win = visual.Window(screenRes, fullscr=params['fullScreen'], allowGUI=False, monitor='testMonitor', screen=params['screenToShow'], units='deg', name='win')
#fixation = visual.GratingStim(win, color='black', tex=None, mask='circle',size=0.2)
fCS = params['fixCrossSize'] # rename for brevity
fcX = params['fixCrossPos'][0] # rename for brevity
fcY = params['fixCrossPos'][1] # rename for brevity
fCS_vertices = ((-fCS/2 + fcX, fcY),(fCS/2 + fcX, fcY),(fcX, fcY),(fcX, fCS/2 + fcY),(fcX, -fCS/2 + fcY))
fixation = visual.ShapeStim(win,lineColor='#000000',lineWidth=3.0,vertices=fCS_vertices,units='pix',closeShape=False);
message1 = visual.TextStim(win, pos=[0, 0], wrapWidth=50, color='#000000', alignHoriz='center', name='topMsg', text="aaa", height=3)
message2 = visual.TextStim(win, pos=[0,-10], wrapWidth=50, color='#000000', alignHoriz='center', name='bottomMsg', text="bbb", height=3)
# initialize photodiode stimulus
squareSize = 0.4
diodeSquare = visual.Rect(win,pos=[squareSize/4-1,squareSize/4-1],lineColor='white',fillColor='white',size=[squareSize,squareSize],units='norm')
# Look up prompts
[topPrompts,bottomPrompts] = PromptTools.GetPrompts(os.path.basename(__file__),params['promptType'],params)
print('%d prompts loaded from %s'%(len(topPrompts),'PromptTools.py'))
# Load sound file
mySound = sound.Sound(value=params['soundFile'], volume=params['soundVolume'], start=params['tSoundStart'], stop=params['tSoundStop'], name='mySound')
# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
# increment time of next window flip
def AddToFlipTime(tIncrement=1.0):
tNextFlip[0] += tIncrement
# print("%1.3f --> %1.3f"%(globalClock.getTime(),tNextFlip[0]))
def RunTrial(preTrialTime, trialTime, restTime, condition,playSound):
# adjust pre-trial time
timePerBeat = 60.0/params['tempo_bpm'];
nPreTrialBeats = int(preTrialTime/timePerBeat)
nTrialBeats = int(trialTime/timePerBeat)
# Display pre-trial message
win.clearBuffer()
for iBeat in range(0, nPreTrialBeats):
# set up frame
message1.setText('%s in %d...'%(condition,nPreTrialBeats-iBeat))
message1.draw()
win.logOnFlip(level=logging.EXP, msg='Display %sIn%d'%(condition,nPreTrialBeats-iBeat))
win.callOnFlip(AddToFlipTime,timePerBeat)
# wait until it's time
while (globalClock.getTime()<tNextFlip[0]):
pass
# flash photodiode
if params['usePhotodiode']:
diodeSquare.draw()
win.flip()
# erase diode square and re-draw
message1.draw()
# FLIP DISPLAY!
win.flip()
# check for escape characters
thisKey = event.getKeys()
if thisKey!=None and len(thisKey)>0 and thisKey[0] in ['q','escape']:
core.quit()
# Display trial message
if playSound:
message1.setText('%s'%(condition))
message1.draw()
win.logOnFlip(level=logging.EXP, msg='Display %s'%(condition))
win.callOnFlip(AddToFlipTime,params['tSoundStop']-params['tSoundStart'])
win.flip()
mySound.play()
while (globalClock.getTime()<tNextFlip[0]):
# check for escape characters
thisKey = event.getKeys()
if thisKey!=None and len(thisKey)>0 and thisKey[0] in ['q','escape']:
core.quit()
# mySound.stop()
# logging.log(level=logging.EXP, msg='here')
else:
for iBeat in range(0, nTrialBeats):
message1.setText('%s (%d/%d)'%(condition,iBeat+1,nTrialBeats))
message1.draw()
win.logOnFlip(level=logging.EXP, msg='Display %s(%d/%d)'%(condition,iBeat+1,nTrialBeats))
win.callOnFlip(AddToFlipTime,timePerBeat)
# logging.log(level=logging.EXP, msg='here')
# wait until it's time
while (globalClock.getTime()<tNextFlip[0]):
pass
# flash photodiode
if params['usePhotodiode']:
diodeSquare.draw()
win.flip()
# erase diode square and re-draw
message1.draw()
# FLIP DISPLAY!
win.flip()
# check for escape characters
thisKey = event.getKeys()
if thisKey!=None and len(thisKey)>0 and thisKey[0] in ['q','escape']:
core.quit()
# # Flush the key buffer and mouse movements
# event.clearEvents()
# # Wait for relevant key press or 'maxPageTime' seconds
# thisKey = event.waitKeys(maxWait=trialTime-0.5,keyList=[params['advanceKey'],'q','escape'])
# Process key press
# if thisKey!=None and len(thisKey)>0 and thisKey[0] in ['q','escape']:
# core.quit()
#allow the screen to update immediately
# tNextFlip[0]=globalClock.getTime()
if restTime>0:
# draw fixation cross
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.callOnFlip(AddToFlipTime,restTime)
# wait until it's time
while (globalClock.getTime()<tNextFlip[0]):
pass
# flash photodiode
if params['usePhotodiode']:
diodeSquare.draw()
win.flip()
# erase diode square and re-draw
fixation.draw()
# FLIP DISPLAY!
win.flip()
# =========================== #
# ======= RUN PROMPTS ======= #
# =========================== #
# display prompts
if not params['skipPrompts']:
PromptTools.RunPrompts(topPrompts,bottomPrompts,win,message1,message2)
# wait for scanner
message1.setText("Waiting for scanner to start...")
message2.setText("(Press '%c' to override.)"%params['triggerKey'].upper())
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display WaitingForScanner')
win.flip()
event.waitKeys(keyList=params['triggerKey'])
tStartSession = globalClock.getTime()
AddToFlipTime(tStartSession+params['tStartup'])
# wait before first stimulus
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.flip()
# =========================== #
# ===== MAIN EXPERIMENT ===== #
# =========================== #
# set up other stuff
logging.log(level=logging.EXP, msg='---START EXPERIMENT---')
# Run trials
for iBlock in range(0,params['nBlocks']): # for each block of pages
# log new block
logging.log(level=logging.EXP, msg='Start Block %d'%iBlock)
# trial loop
trialTypes = params['trialTypes']
if params['randomizeOrder']:
shuffle(trialTypes)
# TO DO: shuffle playSound in same way
for iTrial in range(0,len(trialTypes)):
# display text
logging.log(level=logging.EXP, msg='Block %d, Trial %d'%(iBlock,iTrial))
if iTrial < (len(trialTypes)-1):
RunTrial(params['msgTime'],params['trialTime'],params['restTime'],trialTypes[iTrial],params['playSound'][iTrial])
elif iBlock< (len(trialTypes)-1):
RunTrial(params['msgTime'],params['trialTime'],params['IBI'],trialTypes[iTrial],params['playSound'][iTrial])
else:
RunTrial(params['msgTime'],params['trialTime'],0,trialTypes[iTrial],params['playSound'][iTrial])
# handle end of block
if iBlock == (params['nBlocks']-1):
message1.setText("That's the end of this run!")
message2.setText("Please stay still until the scanner noise stops.")
win.logOnFlip(level=logging.EXP, msg='Display TheEnd')
message1.draw()
message2.draw()
# change the screen
win.flip()
# wait until a button is pressed to exit
thisKey = event.waitKeys(keyList=['q','escape'])
# exit experiment
core.quit()
|
jsteemann/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/ctypes/test/test_refcounts.py
|
68
|
import unittest
import ctypes
import gc
MyCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int)
OtherCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong)
import _ctypes_test
dll = ctypes.CDLL(_ctypes_test.__file__)
class RefcountTestCase(unittest.TestCase):
def test_1(self):
from sys import getrefcount as grc
f = dll._testfunc_callback_i_if
f.restype = ctypes.c_int
f.argtypes = [ctypes.c_int, MyCallback]
def callback(value):
#print "called back with", value
return value
self.failUnlessEqual(grc(callback), 2)
cb = MyCallback(callback)
self.failUnless(grc(callback) > 2)
result = f(-10, cb)
self.failUnlessEqual(result, -18)
cb = None
gc.collect()
self.failUnlessEqual(grc(callback), 2)
def test_refcount(self):
from sys import getrefcount as grc
def func(*args):
pass
# this is the standard refcount for func
self.failUnlessEqual(grc(func), 2)
# the CFuncPtr instance holds atr least one refcount on func:
f = OtherCallback(func)
self.failUnless(grc(func) > 2)
# and may release it again
del f
self.failUnless(grc(func) >= 2)
# but now it must be gone
gc.collect()
self.failUnless(grc(func) == 2)
class X(ctypes.Structure):
_fields_ = [("a", OtherCallback)]
x = X()
x.a = OtherCallback(func)
# the CFuncPtr instance holds atr least one refcount on func:
self.failUnless(grc(func) > 2)
# and may release it again
del x
self.failUnless(grc(func) >= 2)
# and now it must be gone again
gc.collect()
self.failUnlessEqual(grc(func), 2)
f = OtherCallback(func)
# the CFuncPtr instance holds atr least one refcount on func:
self.failUnless(grc(func) > 2)
# create a cycle
f.cycle = f
del f
gc.collect()
self.failUnlessEqual(grc(func), 2)
class AnotherLeak(unittest.TestCase):
def test_callback(self):
import sys
proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int)
def func(a, b):
return a * b * 2
f = proto(func)
a = sys.getrefcount(ctypes.c_int)
f(1, 2)
self.failUnlessEqual(sys.getrefcount(ctypes.c_int), a)
if __name__ == '__main__':
unittest.main()
|
pythonization/reminder2standBy
|
refs/heads/master
|
reminder2standBy/common_qt_app/helpers.py
|
1
|
"""Various helper functions."""
try:
import prctl
# to install run:
# sudo apt install build-essential libcap-dev
# pip3 install --user python-prctl
except ImportError:
prctl = None
def give_name2thread(name, thread_obj):
"""Give name to current thread.
if prctl installed, then also set name via this module. (Then you can see
thread name in htop .)
Do not delete calls of this function. Thread name useful for searching
bugs.
:param name: thread name
:type name: str
:param thread_obj: pass thread object to legacy python function
:type thread_obj: Thread
"""
thread_obj.name = name
if prctl:
prctl.set_name(name)
|
heiko-r/paparazzi
|
refs/heads/master
|
sw/tools/px4/set_target.py
|
19
|
#!/usr/bin/env python
import os
import sys
import serial
import glob
import time
target = sys.argv[1]
firmware_file = sys.argv[2]
print "Target: " + target
print "Firmware file: " + firmware_file
# test if pprz cdm is connected
mode = -1
port = ""
try:
port = "/dev/serial/by-id/usb-Paparazzi_UAV_CDC_Serial_STM32_*"
if len(glob.glob(port)) > 1:
print("Warning: multiple Paparazzi cdc devices found. Selecting the first one.")
port = glob.glob(port)[0]
ser = serial.Serial(port, timeout=0.5)
mode = 1
print ("Paparazzi CDC device found at port: " + port)
except (serial.serialutil.SerialException, IndexError):
print("No Paparazzi CDC device found, looking further.")
if mode == 1:
if target == "fbw":
line = "pprz0"
else:
line = "pprz1"
print ("Sending target command to Paparazzi firmware...")
ser.flush()
ser.write(line)
if target == "fbw":
try:
c = ser.read(7)
print ("AP responded with: " + c)
if c == "TIMEOUT":
print(
"Error: FBW bootloader TIMEOUT. Power cycle the board and wait between 5 seconds to 20 seconds to retry.")
sys.exit(1)
elif c != "FBWOKOK":
print(
"Error: unknown error. Power cycle the board and wait between 5 seconds to 20 seconds to retry.")
sys.exit(1)
except serial.serialutil.SerialException:
pass
print("Uploading using Paparazzi firmware...")
if target == "ap":
print("If the uploading does not start within a few seconds, please replug the usb (power cycle the board).")
sys.exit(0)
if mode == -1: # no pprz cdc was found, look for PX4
ports = glob.glob("/dev/serial/by-id/usb-3D_Robotics*")
ports.append(glob.glob("/dev/serial/by-id/pci-3D_Robotics*"))
for p in ports:
if len(p) > 0:
try:
ser = serial.Serial(p, timeout=0.5)
port = p
mode = 2
print ("Original PX4 firmware CDC device found at port: " + port)
except serial.serialutil.SerialException:
print("Non working PX4 port found, continuing...")
if mode == -1:
print("No original PX4 CDC firmware found either.")
print("Error: no compatible usb device found...")
sys.exit(1)
if target == "fbw":
print("Error: original firmware cannot be used to upload the fbw code. Wait for the PX4 bootloader to exit (takes 5 seconds), or in case this is the first upload; first upload the Paparazzi ap target.")
sys.exit(1)
else:
print("Uploading AP using original PX4 firmware...")
print("If the uploading does not start within a few seconds, please replug the usb (power cycle the board).")
|
ademuk/django-oscar
|
refs/heads/master
|
sites/sandbox/apps/gateway/forms.py
|
60
|
from django import forms
from django.contrib.auth.models import User
from oscar.apps.customer.utils import normalise_email
class GatewayForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
email = normalise_email(self.cleaned_data['email'])
if User.objects.filter(email__iexact=email).exists():
raise forms.ValidationError(
"A user already exists with email %s" % email
)
return email
|
sgzsh269/django
|
refs/heads/master
|
tests/null_queries/tests.py
|
55
|
from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
with self.assertRaises(FieldError):
Choice.objects.filter(foo__exact=None)
# Can't use None on anything other than __exact and __iexact
with self.assertRaises(ValueError):
Choice.objects.filter(id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
|
fake-name/ReadableWebProxy
|
refs/heads/master
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBoredtrans95440947WordpressCom.py
|
1
|
def extractBoredtrans95440947WordpressCom(item):
'''
Parser for 'boredtrans95440947.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
takeshineshiro/nova
|
refs/heads/master
|
nova/api/openstack/compute/floating_ips.py
|
3
|
# Copyright 2011 OpenStack Foundation
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import floating_ips
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import network
LOG = logging.getLogger(__name__)
ALIAS = 'os-floating-ips'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError, AttributeError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['fixed_ip']['instance_uuid']
except (TypeError, KeyError, AttributeError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
try:
instance_id =\
self.network_api.get_instance_id_by_floating_address(
context, address)
except exception.FloatingIpNotFoundForAddress as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
except exception.FloatingIpMultipleFoundForAddress as ex:
raise webob.exc.HTTPConflict(explanation=ex.format_message())
if instance_id:
return common.get_instance(self.compute_api, context, instance_id)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPController, self).__init__()
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.FloatingIpNotFound):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InvalidID as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return _translate_floating_ip_view(floating_ip)
@extensions.expected_errors(())
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
return _translate_floating_ips_view(floating_ips)
@extensions.expected_errors((403, 404))
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps:
if pool:
msg = _("No more floating ips in pool %s.") % pool
else:
msg = _("No more floating ips available.")
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FloatingIpLimitExceeded:
if pool:
msg = _("IP allocation over quota in pool %s.") % pool
else:
msg = _("IP allocation over quota.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.FloatingIpPoolNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _translate_floating_ip_view(ip)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.FloatingIpNotFound):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InvalidID as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
try:
self.network_api.disassociate_and_release_floating_ip(
context, instance, floating_ip)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors((400, 403, 404))
@wsgi.action('addFloatingIp')
@validation.schema(floating_ips.add_floating_ip)
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
address = body['addFloatingIp']['address']
instance = common.get_instance(self.compute_api, context, id)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
LOG.warning(
_LW('Info cache is %r during associate') % instance.info_cache,
instance=instance)
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_address = None
if 'fixed_address' in body['addFloatingIp']:
fixed_address = body['addFloatingIp']['fixed_address']
for fixed in fixed_ips:
if fixed['address'] == fixed_address:
break
else:
msg = _('Specified fixed address not assigned to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
if not fixed_address:
try:
fixed_address = next(ip['address'] for ip in fixed_ips
if netaddr.valid_ipv4(ip['address']))
except StopIteration:
msg = _('Unable to associate floating ip %(address)s '
'to any fixed IPs for instance %(id)s. '
'Instance has no fixed IPv4 addresses to '
'associate.') % (
{'address': address, 'id': id})
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(fixed_ips) > 1:
LOG.warning(_LW('multiple fixed_ips exist, using the first '
'IPv4 fixed_ip: %s'), fixed_address)
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_address)
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.FloatingIpNotFoundForAddress:
msg = _('floating ip not found')
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except Exception as e:
msg = _('Unable to associate floating ip %(address)s to '
'fixed ip %(fixed_address)s for instance %(id)s. '
'Error: %(error)s') % (
{'address': address, 'fixed_address': fixed_address,
'id': id, 'error': e})
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('removeFloatingIp')
@validation.schema(floating_ips.remove_floating_ip)
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
address = body['removeFloatingIp']['address']
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
except exception.FloatingIpNotFoundForAddress:
msg = _("floating ip not found")
raise webob.exc.HTTPNotFound(explanation=msg)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
(uuidutils.is_uuid_like(id) and
[instance.uuid == id] or
[instance.id == id])[0]):
try:
disassociate_floating_ip(self, context, instance, address)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
else:
msg = _("Floating ip %(address)s is not associated with instance "
"%(id)s.") % {'address': address, 'id': id}
raise webob.exc.HTTPConflict(explanation=msg)
class FloatingIps(extensions.V3APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIps"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPController())]
return resource
def get_controller_extensions(self):
controller = FloatingIPActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
spradeepv/dive-into-python
|
refs/heads/master
|
hackerrank/domain/artificial_intelligence/alpha_beta_pruning/tic_tac_toe.py
|
1
|
"""
Tic-tac-toe is a pencil-and-paper game for two players, X (ascii value 88)
and O (ascii value 79), who take turns marking the spaces in a 3*3 grid. The
player who succeeds in placing three respective marks in a horizontal,
vertical, or diagonal row wins the game. Empty space is represented by _ (
ascii value 95), and the X player goes first.
Here is an example game won by the first player, X:
picture alt
The function nextMove takes in a char player, and the 3x3 board as an array.
Complete the function to print 2 space separated integers r and c which
denote the row and column that will be marked in your next move. The top
left position is denoted by (0,0).
How does it work?
Your code is run alternately with the opponent bot for every move.
Example input:
X
___
___
_XO
Example output:
1 0
Explanation:
The board results in the following state after the above move
___
X__
_XO
"""
# !/bin/python
import random
# Complete the function below to print 2 integers separated by a single
# space which will be your next move
def nextMove(player, board):
print board
# If player is X, I'm the first player.
# If player is O, I'm the second player.
player = raw_input()
# Read the board now. The board is a 3x3 array filled with X, O or _.
board = []
for i in xrange(0, 3):
board.append([x for x in raw_input().strip()])
nextMove(player, board)
|
christiantroy/xbmc
|
refs/heads/master
|
lib/libUPnP/Platinum/Build/Tools/Scripts/MakeAllVs.py
|
262
|
#! /usr/bin/env python
import os
import sys
import getopt
import subprocess
configs = ['Debug', 'Release']
solutions = ['../../../Build/Targets/x86-microsoft-win32-vs2008/Platinum.sln']
try:
opts, args = getopt.getopt(sys.argv[1:], "b:rc")
except getopt.GetoptError, (msg, opt):
print 'No build_config, defaulting to build all'
for opt, arg in opts:
if opt == '-b':
config = arg
def CallVsMake(sln, cfg):
cmd = 'python VsMake.py -s %s -b %s' % (sln, cfg)
print cmd
retVal = subprocess.call(cmd.split())
if retVal != 0:
sys.exit(retVal)
for sln in solutions:
if 'config' not in locals() and 'config' not in globals():
print '************ Building all configurations **************'
for cfg in configs:
CallVsMake(sln, cfg)
else:
print '************ Building configuration=' + config + ' ****************'
CallVsMake(sln, config)
|
bratsche/Neutron-Drive
|
refs/heads/master
|
google_appengine/lib/django_0_96/django/utils/tzinfo.py
|
34
|
"Implementation of tzinfo classes for use with datetime.datetime."
import time
from datetime import timedelta, tzinfo
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
self.__offset = timedelta(minutes=offset)
self.__name = "%+03d%02d" % (offset // 60, offset % 60)
def __repr__(self):
return self.__name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
tzinfo.__init__(self, dt)
self._tzname = time.tzname[self._isdst(dt)]
def __repr__(self):
return self._tzname
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
|
georgemarshall/django
|
refs/heads/master
|
django/template/loaders/app_directories.py
|
635
|
"""
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
from django.template.utils import get_app_template_dirs
from .filesystem import Loader as FilesystemLoader
class Loader(FilesystemLoader):
def get_dirs(self):
return get_app_template_dirs('templates')
|
NaturalSolutions/ecoReleve-Server
|
refs/heads/master
|
ecorelevesensor/models/monitored_site.py
|
1
|
"""
Created on Mon Sep 1 14:38:09 2014
@author: Natural Solutions (Thomas)
"""
from sqlalchemy import (
Boolean,
Column,
DateTime,
func,
Index,
Integer,
Sequence,
String,
UniqueConstraint
)
from sqlalchemy.orm import relationship
from ..models import Base, dbConfig
dialect = dbConfig['dialect']
class MonitoredSite(Base):
__tablename__ = 'TMonitoredStations'
id = Column('TGeo_pk_id', Integer, Sequence('seq_monitoredsite_pk_id'),
primary_key=True)
creator = Column(Integer)
type_ = Column('name_Type', String(200))
name = Column(String(50))
id_type = Column('id_Type', Integer)
creation_date = Column(DateTime, server_default=func.now(), nullable=False)
active = Column(Boolean)
positions = relationship('MonitoredSitePosition', lazy='joined',
order_by='desc(MonitoredSitePosition.begin_date)')
__table_args__ = (
Index('idx_Tmonitoredsite_name', name),
UniqueConstraint(type_, name),
)
def __json__(self, request):
return {
'id':self.id,
'name':self.name,
'type':self.type_,
'positions':self.positions
}
"""
A utiliser une fois qu'eReleve aura disparu ...
__tablename__ = 'T_MonitoredSite'
id = Column('PK_id', Integer, Sequence('seq_monitoredsite_pk_id'),
primary_key=True)
creator = Column('FK_creator', Integer, ForeignKey(User.id))
type_ = Column('FK_type', String(256), ForeignKey(Thesaurus.topic_en))
name = Column(String(50))
creation_date = Column(DateTime, server_default=func.now(), nullable=False)
active = Column(Boolean)
__table_args__ = (
Index('idx_Tmonitoredsite_name', name),
{'schema': schema}
)
"""
|
StefanRijnhart/odoo
|
refs/heads/master
|
addons/hr/res_users.py
|
27
|
from openerp import api
from openerp.osv import fields, osv
class res_users(osv.Model):
""" Update of res.users class
- add field for the related employee of the user
- if adding groups to an user, check if base.group_user is in it (member of
'Employee'), create an employee form linked to it. """
_name = 'res.users'
_inherit = ['res.users']
_columns = {
'employee_ids': fields.one2many('hr.employee', 'user_id', 'Related employees'),
'display_employees_suggestions': fields.boolean("Display Employees Suggestions"),
}
_defaults = {
'display_employees_suggestions': True,
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on
display_employees_suggestions fields. Access rights are disabled by
default, but allowed on some specific fields defined in
self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.append('display_employees_suggestions')
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.append('display_employees_suggestions')
return init_res
def stop_showing_employees_suggestions(self, cr, uid, user_id, context=None):
"""Update display_employees_suggestions value to False"""
if context is None:
context = {}
self.write(cr, uid, user_id, {"display_employees_suggestions": False}, context)
def _create_welcome_message(self, cr, uid, user, context=None):
"""Do not welcome new users anymore, welcome new employees instead"""
return True
def _message_post_get_eid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context = dict(context or {})
context['thread_model'] = 'hr.employee'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', thread_id)], context=context)
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users to the related employee.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if kwargs.get('type') == 'email':
return super(res_users, self).message_post(cr, uid, thread_id, context=context, **kwargs)
res = None
employee_ids = self._message_post_get_eid(cr, uid, thread_id, context=context)
if not employee_ids: # no employee: fall back on previous behavior
return super(res_users, self).message_post(cr, uid, thread_id, context=context, **kwargs)
for employee_id in employee_ids:
res = self.pool.get('hr.employee').message_post(cr, uid, employee_id, context=context, **kwargs)
return res
|
USC-ACTLab/crazyswarm
|
refs/heads/master
|
ros_ws/src/crazyswarm/scripts/figure8_csv.py
|
1
|
#!/usr/bin/env python
import numpy as np
from pycrazyswarm import *
import uav_trajectory
if __name__ == "__main__":
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
allcfs = swarm.allcfs
traj1 = uav_trajectory.Trajectory()
traj1.loadcsv("figure8.csv")
TRIALS = 1
TIMESCALE = 1.0
for i in range(TRIALS):
for cf in allcfs.crazyflies:
cf.uploadTrajectory(0, 0, traj1)
allcfs.takeoff(targetHeight=1.0, duration=2.0)
timeHelper.sleep(2.5)
for cf in allcfs.crazyflies:
pos = np.array(cf.initialPosition) + np.array([0, 0, 1.0])
cf.goTo(pos, 0, 2.0)
timeHelper.sleep(2.5)
allcfs.startTrajectory(0, timescale=TIMESCALE)
timeHelper.sleep(traj1.duration * TIMESCALE + 2.0)
allcfs.startTrajectory(0, timescale=TIMESCALE, reverse=True)
timeHelper.sleep(traj1.duration * TIMESCALE + 2.0)
allcfs.land(targetHeight=0.06, duration=2.0)
timeHelper.sleep(3.0)
|
mariianna/kodi
|
refs/heads/master
|
core/ClientCookie/_Opener.py
|
17
|
"""Integration with Python standard library module urllib2: OpenerDirector
class.
Copyright 2004 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD License (see the file COPYING included with the
distribution).
"""
try: True
except NameError:
True = 1
False = 0
import urllib2, string, bisect, urlparse
from _Util import startswith, isstringlike
from _Request import Request
def methnames(obj):
"""Return method names of class instance.
dir(obj) doesn't work across Python versions, this does.
"""
return methnames_of_instance_as_dict(obj).keys()
def methnames_of_instance_as_dict(inst):
names = {}
names.update(methnames_of_class_as_dict(inst.__class__))
for methname in dir(inst):
candidate = getattr(inst, methname)
if callable(candidate):
names[methname] = None
return names
def methnames_of_class_as_dict(klass):
names = {}
for methname in dir(klass):
candidate = getattr(klass, methname)
if callable(candidate):
names[methname] = None
for baseclass in klass.__bases__:
names.update(methnames_of_class_as_dict(baseclass))
return names
class OpenerMixin:
def _request(self, url_or_req, data):
if isstringlike(url_or_req):
req = Request(url_or_req, data)
else:
# already a urllib2.Request or ClientCookie.Request instance
req = url_or_req
if data is not None:
req.add_data(data)
return req
def retrieve(self, fullurl, filename=None, reporthook=None, data=None):
"""Returns (filename, headers).
For remote objects, the default filename will refer to a temporary
file.
"""
req = self._request(fullurl, data)
type_ = req.get_type()
fp = self.open(req)
headers = fp.info()
if filename is None and type == 'file':
return url2pathname(req.get_selector()), headers
if filename:
tfp = open(filename, 'wb')
else:
path = urlparse(fullurl)[2]
suffix = os.path.splitext(path)[1]
tfp = tempfile.TemporaryFile("wb", suffix=suffix)
result = filename, headers
bs = 1024*8
size = -1
read = 0
blocknum = 1
if reporthook:
if headers.has_key("content-length"):
size = int(headers["Content-Length"])
reporthook(0, bs, size)
while 1:
block = fp.read(bs)
read += len(block)
if reporthook:
reporthook(blocknum, bs, size)
blocknum = blocknum + 1
if not block:
break
tfp.write(block)
fp.close()
tfp.close()
del fp
del tfp
if size>=0 and read<size:
raise IOError("incomplete retrieval error",
"got only %d bytes out of %d" % (read,size))
return result
class OpenerDirector(urllib2.OpenerDirector, OpenerMixin):
def __init__(self):
urllib2.OpenerDirector.__init__(self)
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
added = False
for meth in methnames(handler):
i = string.find(meth, "_")
protocol = meth[:i]
condition = meth[i+1:]
if startswith(condition, "error"):
j = string.find(meth[i+1:], "_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif (condition == "open" and
protocol not in ["do", "proxy"]): # hack -- see below
kind = protocol
lookup = self.handle_open
elif (condition in ["response", "request"] and
protocol != "redirect"): # yucky hack
# hack above is to fix HTTPRedirectHandler problem, which
# appears to above line to be a processor because of the
# redirect_request method :-((
kind = protocol
lookup = getattr(self, "process_"+condition)
else:
continue
if lookup.has_key(kind):
bisect.insort(lookup[kind], handler)
else:
lookup[kind] = [handler]
added = True
continue
if added:
# XXX why does self.handlers need to be sorted?
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def open(self, fullurl, data=None):
req = self._request(fullurl, data)
type_ = req.get_type()
# pre-process request
# XXX should we allow a Processor to change the type (URL
# scheme) of the request?
meth_name = type_+"_request"
for processor in self.process_request.get(type_, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = urllib2.OpenerDirector.open(self, req, data)
# post-process response
meth_name = type_+"_response"
for processor in self.process_response.get(type_, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def error(self, proto, *args):
if proto in ['http', 'https']:
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = apply(self._call_chain, args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return apply(self._call_chain, args)
|
xavierwu/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_base.py
|
216
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
rochacbruno/flasgger
|
refs/heads/master
|
examples/swagger_config_2.py
|
1
|
"""
In this example `openapi` version is used instead of `swagger` version.
"""
from flask import Flask, jsonify
from flasgger import Swagger, swag_from
app = Flask(__name__)
swagger_config = {
"headers": [],
"specs": [
{
"endpoint": "swagger",
"route": "/characteristics/swagger.json",
"rule_filter": lambda rule: True, # all in
"model_filter": lambda tag: True, # all in
}
],
"title": "Product Characteristics APIs",
"version": '',
"termsOfService": "",
"static_url_path": "/characteristics/static",
"swagger_ui": True,
"specs_route": "/characteristics/swagger/",
"description": "",
"securityDefinitions": {
"oAuthSample": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://api.pgsmartshopassistant.com/o/token/",
}
}
}
colors_spec = {
"tags": [
"colors"
],
"parameters": [
{
"name": "palette",
"in": "path",
"type": "string",
"enum": [
"all",
"rgb",
"cmyk"
],
"required": True,
"default": "all",
"description": "Which palette to filter?"
}
],
"operationId": "get_colors",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"security": {
"colors_oauth": {
"$ref": "#/securityDefinitions/oAuthSample"
}
},
"schemes": [
"http",
"https"
],
"externalDocs": {
"description": "Project repository",
"url": "http://github.com/rochacbruno/flasgger"
},
"definitions": {
"Palette": {
"type": "object",
"properties": {
"palette_name": {
"type": "array",
"items": {
"$ref": "#/definitions/Color"
}
}
}
},
"Color": {
"type": "string"
}
},
"responses": {
"200": {
"description": "A list of colors (may be filtered by palette)",
"schema": {
"$ref": "#/definitions/Palette"
},
"examples": {
"rgb": [
"red",
"green",
"blue"
]
}
}
}
}
@app.route('/colors/<palette>/')
@swag_from(colors_spec)
def colors(palette):
"""
Example using a dictionary as specification
This is the description
You can also set 'summary' and 'description' in
specs_dict
---
# values here overrides the specs dict
"""
all_colors = {
'cmyk': ['cian', 'magenta', 'yellow', 'black'],
'rgb': ['red', 'green', 'blue']
}
if palette == 'all':
result = all_colors
else:
result = {palette: all_colors.get(palette)}
return jsonify(result)
swag = Swagger(app, config=swagger_config)
def test_swag(client, specs_data):
"""
This test is runs automatically in Travis CI
:param client: Flask app test client
:param specs_data: {'url': {swag_specs}} for every spec in app
"""
for spec in specs_data.values():
assert 'securityDefinitions' in spec
assert 'oAuthSample' in spec['securityDefinitions']
if __name__ == '__main__':
app.run(debug=True)
|
latendre/PythonCyc
|
refs/heads/master
|
setup.py
|
1
|
# Copyright (c) 2014, SRI International
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
from distutils.core import setup
setup(
name='PythonCyc',
version='1.0',
author='Mario Latendresse',
author_email='latendre@ai.sri.com',
packages=['pythoncyc'],
license='LICENSE',
description='A Python interface to Pathway Tools',
long_description=open('README.md').read()
)
|
xindus40223115/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/textwrap.py
|
745
|
"""Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
|
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyUpdatePropertySignatureQuickFixTest/setter_after.py
|
80
|
class A(Aa):
@property
def x(self, r):
return r
@x.setter
def x(self, r):
self._x = ""
|
wilsonfreitas/pelican-plugins
|
refs/heads/master
|
i18n_subsites/__init__.py
|
75
|
from .i18n_subsites import *
|
meteorcloudy/bazel
|
refs/heads/master
|
src/test/py/bazel/bazel_windows_cpp_test.py
|
3
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import unittest
from src.test.py.bazel import test_base
class BazelWindowsCppTest(test_base.TestBase):
def createProjectFiles(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'package(',
' default_visibility = ["//visibility:public"],',
' features=["windows_export_all_symbols"]',
')',
'',
'cc_library(',
' name = "A",',
' srcs = ["a.cc"],',
' hdrs = ["a.h"],',
' copts = ["/DCOMPILING_A_DLL"],',
' features = ["no_windows_export_all_symbols"],',
')',
'',
'cc_library(',
' name = "B",',
' srcs = ["b.cc"],',
' hdrs = ["b.h"],',
' deps = [":A"],',
' copts = ["/DNO_DLLEXPORT"],',
')',
'',
'cc_binary(',
' name = "C",',
' srcs = ["c.cc"],',
' deps = [":A", ":B" ],',
' linkstatic = 0,',
')',
])
self.ScratchFile('a.cc', [
'#include <stdio.h>',
'#include "a.h"',
'int a = 0;',
'void hello_A() {',
' a++;',
' printf("Hello A, %d\\n", a);',
'}',
])
self.ScratchFile('b.cc', [
'#include <stdio.h>',
'#include "a.h"',
'#include "b.h"',
'void hello_B() {',
' hello_A();',
' printf("Hello B\\n");',
'}',
])
header_temp = [
'#ifndef %{name}_H',
'#define %{name}_H',
'',
'#if NO_DLLEXPORT',
' #define DLLEXPORT',
'#elif COMPILING_%{name}_DLL',
' #define DLLEXPORT __declspec(dllexport)',
'#else',
' #define DLLEXPORT __declspec(dllimport)',
'#endif',
'',
'DLLEXPORT void hello_%{name}();',
'',
'#endif',
]
self.ScratchFile('a.h',
[line.replace('%{name}', 'A') for line in header_temp])
self.ScratchFile('b.h',
[line.replace('%{name}', 'B') for line in header_temp])
c_cc_content = [
'#include <stdio.h>',
'#include "a.h"',
'#include "b.h"',
'',
'void hello_C() {',
' hello_A();',
' hello_B();',
' printf("Hello C\\n");',
'}',
'',
'int main() {',
' hello_C();',
' return 0;',
'}',
]
self.ScratchFile('c.cc', c_cc_content)
self.ScratchFile('lib/BUILD', [
'cc_library(',
' name = "A",',
' srcs = ["dummy.cc"],',
' features = ["windows_export_all_symbols"],',
' visibility = ["//visibility:public"],',
')',
])
self.ScratchFile('lib/dummy.cc', ['void dummy() {}'])
self.ScratchFile('main/main.cc', c_cc_content)
def getBazelInfo(self, info_key):
exit_code, stdout, stderr = self.RunBazel(['info', info_key])
self.AssertExitCode(exit_code, 0, stderr)
return stdout[0]
def testBuildDynamicLibraryWithUserExportedSymbol(self):
self.createProjectFiles()
bazel_bin = self.getBazelInfo('bazel-bin')
# //:A export symbols by itself using __declspec(dllexport), so it doesn't
# need Bazel to export symbols using DEF file.
exit_code, _, stderr = self.RunBazel(
['build', '//:A', '--output_groups=dynamic_library'])
self.AssertExitCode(exit_code, 0, stderr)
# TODO(pcloudy): change suffixes to .lib and .dll after making DLL
# extensions correct on Windows.
import_library = os.path.join(bazel_bin, 'A.if.lib')
shared_library = os.path.join(bazel_bin, 'A_6f2d5ec56a.dll')
empty_def_file = os.path.join(bazel_bin, 'A.gen.empty.def')
self.assertTrue(os.path.exists(import_library))
self.assertTrue(os.path.exists(shared_library))
# An empty DEF file should be generated for //:A
self.assertTrue(os.path.exists(empty_def_file))
def testBuildDynamicLibraryWithExportSymbolFeature(self):
self.createProjectFiles()
bazel_bin = self.getBazelInfo('bazel-bin')
# //:B doesn't export symbols by itself, so it need Bazel to export symbols
# using DEF file.
exit_code, _, stderr = self.RunBazel(
['build', '//:B', '--output_groups=dynamic_library'])
self.AssertExitCode(exit_code, 0, stderr)
# TODO(pcloudy): change suffixes to .lib and .dll after making DLL
# extensions correct on Windows.
import_library = os.path.join(bazel_bin, 'B.if.lib')
shared_library = os.path.join(bazel_bin, 'B_6f2d5ec56a.dll')
def_file = os.path.join(bazel_bin, 'B.gen.def')
self.assertTrue(os.path.exists(import_library))
self.assertTrue(os.path.exists(shared_library))
# DEF file should be generated for //:B
self.assertTrue(os.path.exists(def_file))
# Test build //:B if windows_export_all_symbols feature is disabled by
# no_windows_export_all_symbols.
exit_code, _, stderr = self.RunBazel([
'build', '//:B', '--output_groups=dynamic_library',
'--features=no_windows_export_all_symbols'
])
self.AssertExitCode(exit_code, 0, stderr)
import_library = os.path.join(bazel_bin, 'B.if.lib')
shared_library = os.path.join(bazel_bin, 'B_6f2d5ec56a.dll')
empty_def_file = os.path.join(bazel_bin, 'B.gen.empty.def')
self.assertTrue(os.path.exists(import_library))
self.assertTrue(os.path.exists(shared_library))
# An empty DEF file should be generated for //:B
self.assertTrue(os.path.exists(empty_def_file))
self.AssertFileContentNotContains(empty_def_file, 'hello_B')
def testBuildCcBinaryWithDependenciesDynamicallyLinked(self):
self.createProjectFiles()
bazel_bin = self.getBazelInfo('bazel-bin')
# Since linkstatic=0 is specified for //:C, it's dependencies should be
# dynamically linked.
exit_code, _, stderr = self.RunBazel(['build', '//:C'])
self.AssertExitCode(exit_code, 0, stderr)
# TODO(pcloudy): change suffixes to .lib and .dll after making DLL
# extensions correct on
# Windows.
# a_import_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.if.lib')))
# a_shared_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A_6f2d5ec56a.dll')))
# a_def_file
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.gen.empty.def')))
# b_import_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.if.lib')))
# b_shared_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B_6f2d5ec56a.dll')))
# b_def_file
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.gen.def')))
# c_exe
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'C.exe')))
def testBuildCcBinaryFromDifferentPackage(self):
self.createProjectFiles()
self.ScratchFile('main/BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["//:B"],',
' linkstatic = 0,'
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//main:main'])
self.AssertExitCode(exit_code, 0, stderr)
# Test if A.dll and B.dll are copied to the directory of main.exe
main_bin = os.path.join(bazel_bin, 'main/main.exe')
self.assertTrue(os.path.exists(main_bin))
self.assertTrue(
os.path.exists(os.path.join(bazel_bin, 'main/A_6f2d5ec56a.dll')))
self.assertTrue(
os.path.exists(os.path.join(bazel_bin, 'main/B_6f2d5ec56a.dll')))
# Run the binary to see if it runs successfully
exit_code, stdout, stderr = self.RunProgram([main_bin])
self.AssertExitCode(exit_code, 0, stderr)
self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout)
def testBuildCcBinaryDependsOnConflictDLLs(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["//:B", "//lib:A"],', # Transitively depends on //:A
' linkstatic = 0,'
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
# //main:main depends on both //lib:A and //:A
exit_code, _, stderr = self.RunBazel(['build', '//main:main'])
self.AssertExitCode(exit_code, 0, stderr)
# Run the binary to see if it runs successfully
main_bin = os.path.join(bazel_bin, 'main/main.exe')
exit_code, stdout, stderr = self.RunProgram([main_bin])
self.AssertExitCode(exit_code, 0, stderr)
self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout)
# There are 2 A_{hash}.dll since //main:main depends on both //lib:A and
# //:A
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 2)
# There is only 1 B_{hash}.dll
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'B_*.dll'))), 1)
def testBuildDifferentCcBinariesDependOnConflictDLLs(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 0,'
')',
'',
'cc_binary(',
' name = "other_main",',
' srcs = ["other_main.cc"],',
' deps = ["//lib:A"],',
' linkstatic = 0,'
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
self.ScratchFile('main/other_main.cc', ['int main() {return 0;}'])
# Building //main:main should succeed
exit_code, _, stderr = self.RunBazel(
['build', '//main:main', '--incompatible_avoid_conflict_dlls'])
self.AssertExitCode(exit_code, 0, stderr)
main_bin = os.path.join(bazel_bin, 'main/main.exe')
# Run the main_bin binary to see if it runs successfully
exit_code, stdout, stderr = self.RunProgram([main_bin])
self.AssertExitCode(exit_code, 0, stderr)
self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout)
# There is only 1 A_{hash}.dll since //main:main depends transitively on
# //:A
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 1)
# There is only 1 B_{hash}.dll
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'B_*.dll'))), 1)
# Building //main:other_main should succeed
exit_code, _, stderr = self.RunBazel([
'build', '//main:main', '//main:other_main',
'--incompatible_avoid_conflict_dlls'
])
self.AssertExitCode(exit_code, 0, stderr)
other_main_bin = os.path.join(bazel_bin, 'main/other_main.exe')
# Run the other_main_bin binary to see if it runs successfully
exit_code, stdout, stderr = self.RunProgram([other_main_bin])
self.AssertExitCode(exit_code, 0, stderr)
# There are 2 A_{hash}.dll since //main:main depends on //:A
# and //main:other_main depends on //lib:A
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 2)
def testDLLIsCopiedFromExternalRepo(self):
self.ScratchFile('ext_repo/WORKSPACE')
self.ScratchFile('ext_repo/BUILD', [
'cc_library(',
' name = "A",',
' srcs = ["a.cc"],',
' features = ["windows_export_all_symbols"],',
' visibility = ["//visibility:public"],',
')',
])
self.ScratchFile('ext_repo/a.cc', [
'#include <stdio.h>',
'void hello_A() {',
' printf("Hello A\\n");',
'}',
])
self.ScratchFile('WORKSPACE', [
'local_repository(',
' name = "ext_repo",',
' path = "ext_repo",',
')',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["@ext_repo//:A"],',
' linkstatic = 0,',
')',
])
self.ScratchFile('main.cc', [
'extern void hello_A();',
'',
'int main() {',
' hello_A();',
' return 0;',
'}',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//:main', '-s'])
self.AssertExitCode(exit_code, 0, stderr)
# Test if A.dll is copied to the directory of main.exe
main_bin = os.path.join(bazel_bin, 'main.exe')
self.assertTrue(os.path.exists(main_bin))
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A_128f2c79c3.dll')))
# Run the binary to see if it runs successfully
exit_code, stdout, stderr = self.RunProgram([main_bin])
self.AssertExitCode(exit_code, 0, stderr)
self.assertEqual(['Hello A'], stdout)
def testDynamicLinkingMSVCRT(self):
self.createProjectFiles()
bazel_output = self.getBazelInfo('output_path')
# By default, it should link to msvcrt dynamically.
exit_code, _, stderr = self.RunBazel(
['build', '//:A', '--output_groups=dynamic_library', '-s'])
paramfile = os.path.join(
bazel_output, 'x64_windows-fastbuild/bin/A_6f2d5ec56a.dll-2.params')
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('/MD', ''.join(stderr))
self.AssertFileContentContains(paramfile, '/DEFAULTLIB:msvcrt.lib')
self.assertNotIn('/MT', ''.join(stderr))
self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:libcmt.lib')
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel(
['build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library', '-s'])
paramfile = os.path.join(bazel_output,
'x64_windows-dbg/bin/A_6f2d5ec56a.dll-2.params')
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('/MDd', ''.join(stderr))
self.AssertFileContentContains(paramfile, '/DEFAULTLIB:msvcrtd.lib')
self.assertNotIn('/MTd', ''.join(stderr))
self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:libcmtd.lib')
def testStaticLinkingMSVCRT(self):
self.createProjectFiles()
bazel_output = self.getBazelInfo('output_path')
# With static_link_msvcrt feature, it should link to msvcrt statically.
exit_code, _, stderr = self.RunBazel([
'build', '//:A', '--output_groups=dynamic_library',
'--features=static_link_msvcrt', '-s'
])
paramfile = os.path.join(
bazel_output, 'x64_windows-fastbuild/bin/A_6f2d5ec56a.dll-2.params')
self.AssertExitCode(exit_code, 0, stderr)
self.assertNotIn('/MD', ''.join(stderr))
self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:msvcrt.lib')
self.assertIn('/MT', ''.join(stderr))
self.AssertFileContentContains(paramfile, '/DEFAULTLIB:libcmt.lib')
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel([
'build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library',
'--features=static_link_msvcrt', '-s'
])
paramfile = os.path.join(bazel_output,
'x64_windows-dbg/bin/A_6f2d5ec56a.dll-2.params')
self.AssertExitCode(exit_code, 0, stderr)
self.assertNotIn('/MDd', ''.join(stderr))
self.AssertFileContentNotContains(paramfile, '/DEFAULTLIB:msvcrtd.lib')
self.assertIn('/MTd', ''.join(stderr))
self.AssertFileContentContains(paramfile, '/DEFAULTLIB:libcmtd.lib')
def testBuildSharedLibraryFromCcBinaryWithStaticLink(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main.dll",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 1,'
' linkshared = 1,'
' features=["windows_export_all_symbols"]',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel([
'build', '//main:main.dll',
'--output_groups=default,runtime_dynamic_libraries,interface_library'
])
self.AssertExitCode(exit_code, 0, stderr)
main_library = os.path.join(bazel_bin, 'main/main.dll')
main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib')
def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def')
self.assertTrue(os.path.exists(main_library))
self.assertTrue(os.path.exists(main_interface))
self.assertTrue(os.path.exists(def_file))
# A.dll and B.dll should not be copied.
self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/A.dll')))
self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/B.dll')))
self.AssertFileContentContains(def_file, 'hello_A')
self.AssertFileContentContains(def_file, 'hello_B')
self.AssertFileContentContains(def_file, 'hello_C')
def testBuildSharedLibraryFromCcBinaryWithDynamicLink(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main.dll",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 0,'
' linkshared = 1,'
' features=["windows_export_all_symbols"]',
')',
'',
'genrule(',
' name = "renamed_main",',
' srcs = [":main.dll"],',
' outs = ["main_renamed.dll"],',
' cmd = "cp $< $@",',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel([
'build', '//main:main.dll',
'--output_groups=default,runtime_dynamic_libraries,interface_library'
])
self.AssertExitCode(exit_code, 0, stderr)
main_library = os.path.join(bazel_bin, 'main/main.dll')
main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib')
def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def')
self.assertTrue(os.path.exists(main_library))
self.assertTrue(os.path.exists(main_interface))
self.assertTrue(os.path.exists(def_file))
# A.dll and B.dll should be built and copied because they belong to
# runtime_dynamic_libraries output group.
self.assertTrue(
os.path.exists(os.path.join(bazel_bin, 'main/A_6f2d5ec56a.dll')))
self.assertTrue(
os.path.exists(os.path.join(bazel_bin, 'main/B_6f2d5ec56a.dll')))
# hello_A and hello_B should not be exported.
self.AssertFileContentNotContains(def_file, 'hello_A')
self.AssertFileContentNotContains(def_file, 'hello_B')
self.AssertFileContentContains(def_file, 'hello_C')
# The copy should succeed since //main:main.dll is only supposed to refer to
# main.dll, A.dll and B.dll should be in a separate output group.
exit_code, _, stderr = self.RunBazel(['build', '//main:renamed_main'])
self.AssertExitCode(exit_code, 0, stderr)
def testGetDefFileOfSharedLibraryFromCcBinary(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main.dll",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 1,'
' linkshared = 1,'
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(
['build', '//main:main.dll', '--output_groups=def_file'])
self.AssertExitCode(exit_code, 0, stderr)
# Although windows_export_all_symbols is not specified for this target,
# we should still be able to get the DEF file by def_file output group.
def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def')
self.assertTrue(os.path.exists(def_file))
self.AssertFileContentContains(def_file, 'hello_A')
self.AssertFileContentContains(def_file, 'hello_B')
self.AssertFileContentContains(def_file, 'hello_C')
def testBuildSharedLibraryWithoutAnySymbolExported(self):
self.createProjectFiles()
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "A.dll",',
' srcs = ["a.cc", "a.h"],',
' copts = ["/DNO_DLLEXPORT"],',
' linkshared = 1,'
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//:A.dll'])
self.AssertExitCode(exit_code, 0, stderr)
# Although windows_export_all_symbols is not specified for this target,
# we should still be able to build a DLL without any symbol exported.
empty_def_file = os.path.join(bazel_bin, 'A.dll.gen.empty.def')
self.assertTrue(os.path.exists(empty_def_file))
self.AssertFileContentNotContains(empty_def_file, 'hello_A')
def testUsingDefFileGeneratedFromCcLibrary(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('lib_A.cc', ['void hello_A() {}'])
self.ScratchFile('lib_B.cc', ['void hello_B() {}'])
self.ScratchFile('BUILD', [
'cc_library(',
' name = "lib_A",',
' srcs = ["lib_A.cc"],',
')',
'',
'cc_library(',
' name = "lib_B",',
' srcs = ["lib_B.cc"],',
' deps = [":lib_A"]',
')',
'',
'filegroup(',
' name = "lib_B_symbols",',
' srcs = [":lib_B"],',
' output_group = "def_file",',
')',
'',
'cc_binary(',
' name = "lib.dll",',
' deps = [":lib_B"],',
' win_def_file = ":lib_B_symbols",',
' linkshared = 1,',
')',
])
# Test specifying DEF file in cc_binary
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//:lib.dll', '-s'])
self.AssertExitCode(exit_code, 0, stderr)
def_file = bazel_bin + '/lib_B.gen.def'
self.assertTrue(os.path.exists(def_file))
# hello_A should not be exported
self.AssertFileContentNotContains(def_file, 'hello_A')
# hello_B should be exported
self.AssertFileContentContains(def_file, 'hello_B')
def testWinDefFileAttribute(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('lib.cc', ['void hello() {}'])
self.ScratchFile('my_lib.def', [
'EXPORTS',
' ?hello@@YAXXZ',
])
self.ScratchFile('BUILD', [
'cc_library(',
' name = "lib",',
' srcs = ["lib.cc"],',
' win_def_file = "my_lib.def",',
')',
'',
'cc_binary(',
' name = "lib_dy.dll",',
' srcs = ["lib.cc"],',
' win_def_file = "my_lib.def",',
' linkshared = 1,',
')',
])
# Test exporting symbols using custom DEF file in cc_library.
# Auto-generating DEF file should be disabled when custom DEF file specified
# Rename DLL shoud be disabled when when custom DEF file specified
exit_code, _, stderr = self.RunBazel([
'build', '//:lib', '-s', '--output_groups=dynamic_library',
'--features=windows_export_all_symbols'
])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = self.getBazelInfo('bazel-bin')
lib_if = os.path.join(bazel_bin, 'lib.if.lib')
lib_def = os.path.join(bazel_bin, 'lib.gen.def')
lib_dll = os.path.join(bazel_bin, 'lib.dll')
self.assertTrue(os.path.exists(lib_if))
self.assertFalse(os.path.exists(lib_def))
self.assertTrue(os.path.exists(lib_dll))
# Test specifying DEF file in cc_binary
exit_code, _, stderr = self.RunBazel(['build', '//:lib_dy.dll', '-s'])
self.AssertExitCode(exit_code, 0, stderr)
filepath = bazel_bin + '/lib_dy.dll-2.params'
with open(filepath, 'r', encoding='latin-1') as param_file:
self.assertIn('/DEF:my_lib.def', param_file.read())
def testCcImportRule(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('A.lib', [])
self.ScratchFile('A.dll', [])
self.ScratchFile('A.if.lib', [])
self.ScratchFile('BUILD', [
'cc_import(',
' name = "a_import",',
' static_library = "A.lib",',
' shared_library = "A.dll",',
' interface_library = "A.if.lib",',
' hdrs = ["a.h"],',
' alwayslink = 1,',
')',
])
exit_code, _, stderr = self.RunBazel([
'build', '//:a_import',
])
self.AssertExitCode(exit_code, 0, stderr)
def testCopyDLLAsSource(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_import(',
' name = "a_import",',
' shared_library = "A.dll",',
' visibility = ["//:__subpackages__"],',
')',
''
'filegroup(',
' name = "bin_src",',
' srcs = ["bin.cc"],',
' visibility = ["//:__subpackages__"],',
')',
'',
'cc_binary(',
' name = "bin",',
' srcs = ["//:bin_src"],',
' deps = ["//:a_import"],',
')',
])
self.ScratchFile('package/BUILD', [
'cc_binary(',
' name = "dir1/dir2/bin",',
' srcs = ["//:bin_src"],',
' deps = ["//:a_import"],',
')',
])
self.ScratchFile('A.dll')
self.ScratchFile('bin.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build',
'//:bin',
'//package:dir1/dir2/bin',
])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = self.getBazelInfo('bazel-bin')
# Even though A.dll is in the same package as bin.exe, it still should
# be copied to the output directory of bin.exe.
a_dll = os.path.join(bazel_bin, 'A.dll')
self.assertTrue(os.path.exists(a_dll))
nested_a_dll = os.path.join(bazel_bin, 'package/dir1/dir2/A.dll')
self.assertTrue(os.path.exists(nested_a_dll))
def testCppErrorShouldBeVisible(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "bad",',
' srcs = ["bad.cc"],',
')',
])
self.ScratchFile('bad.cc', [
'int main(int argc, char** argv) {',
' this_is_an_error();',
'}',
])
exit_code, stdout, stderr = self.RunBazel(['build', '//:bad'])
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn('this_is_an_error', ''.join(stdout))
def testBuildWithClangClByCompilerFlag(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=clang-cl',
'--incompatible_enable_cc_toolchain_resolution=false', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('clang-cl.exe', ''.join(stderr))
def testBuildWithClangClByToolchainResolution(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE', [
'register_execution_platforms(',
' ":windows_clang"',
')',
'',
'register_toolchains(',
' "@local_config_cc//:cc-toolchain-x64_windows-clang-cl",',
')',
])
self.ScratchFile('BUILD', [
'platform(',
' name = "windows_clang",',
' constraint_values = [',
' "@platforms//cpu:x86_64",',
' "@platforms//os:windows",',
' "@bazel_tools//tools/cpp:clang-cl",',
' ]',
')',
'',
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--incompatible_enable_cc_toolchain_resolution=true',
'//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('clang-cl.exe', ''.join(stderr))
def createSimpleCppWorkspace(self, name):
work_dir = self.ScratchDir(name)
self.ScratchFile(name + '/WORKSPACE', ['workspace(name = "%s")' % name])
self.ScratchFile(
name + '/BUILD',
['cc_library(name = "lib", srcs = ["lib.cc"], hdrs = ["lib.h"])'])
self.ScratchFile(name + '/lib.h', ['void hello();'])
self.ScratchFile(name + '/lib.cc', ['#include "lib.h"', 'void hello() {}'])
return work_dir
# Regression test for https://github.com/bazelbuild/bazel/issues/9172
def testCacheBetweenWorkspaceWithDifferentNames(self):
cache_dir = self.ScratchDir('cache')
dir_a = self.createSimpleCppWorkspace('A')
dir_b = self.createSimpleCppWorkspace('B')
exit_code, _, stderr = self.RunBazel(
['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_a)
self.AssertExitCode(exit_code, 0, stderr)
exit_code, _, stderr = self.RunBazel(
['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_b)
self.AssertExitCode(exit_code, 0, stderr)
# Regression test for https://github.com/bazelbuild/bazel/issues/9321
def testCcCompileWithTreeArtifactAsSource(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'load(":genccs.bzl", "genccs")',
'',
'genccs(',
' name = "gen_tree",',
')',
'',
'cc_library(',
' name = "main",',
' srcs = [ "gen_tree" ]',
')',
'',
'cc_binary(',
' name = "genccs",',
' srcs = [ "genccs.cpp" ],',
')',
])
self.ScratchFile('genccs.bzl', [
'def _impl(ctx):',
' tree = ctx.actions.declare_directory(ctx.attr.name + ".cc")',
' ctx.actions.run(',
' inputs = [],',
' outputs = [ tree ],',
' arguments = [ tree.path ],',
' progress_message = "Generating cc files into \'%s\'" % tree.path,',
' executable = ctx.executable._tool,',
' )',
'',
' return [ DefaultInfo(files = depset([ tree ])) ]',
'',
'genccs = rule(',
' implementation = _impl,',
' attrs = {',
' "_tool": attr.label(',
' executable = True,',
' cfg = "host",',
' allow_files = True,',
' default = Label("//:genccs"),',
' )',
' }',
')',
])
self.ScratchFile('genccs.cpp', [
'#include <fstream>',
'#include <Windows.h>',
'using namespace std;',
'',
'int main (int argc, char *argv[]) {',
' CreateDirectory(argv[1], NULL);',
' ofstream myfile;',
' myfile.open(string(argv[1]) + string("/foo.cpp"));',
' myfile << "int main() { return 42; }";',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel(['build', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
def testBuild32BitCppBinaryWithMsvcCL(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--cpu=x64_x86_windows', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('x86\\cl.exe', '\n'.join(stderr))
def testBuildArmCppBinaryWithMsvcCL(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--cpu=x64_arm_windows', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('arm\\cl.exe', '\n'.join(stderr))
def testBuildArm64CppBinaryWithMsvcCL(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--cpu=x64_arm64_windows', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('arm64\\cl.exe', '\n'.join(stderr))
def testBuildCppBinaryWithMingwGCC(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
# Test build without debug and optimize modes.
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--compiler=mingw-gcc', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('mingw64\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.assertNotIn('-Wl,--gc-sections', ''.join(stderr))
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--compiler=mingw-gcc', '-c', 'dbg', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('mingw64\\bin\\gcc', '\n'.join(stderr))
self.assertIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.assertNotIn('-Wl,--gc-sections', ''.join(stderr))
# Test build in optimize mode.
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--compiler=mingw-gcc', '-c', 'opt', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('mingw64\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.assertIn('-Wl,--gc-sections', ''.join(stderr))
def testBuildCppBinaryWithMsysGCC(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
bazel_output = self.getBazelInfo('output_path')
paramfile = 'x64_windows-%s/bin/main.exe-2.params'
# Test build without debug and optimize modes.
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--compiler=msys-gcc', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('usr\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.AssertFileContentNotContains(
os.path.join(bazel_output, paramfile % 'fastbuild'),
'-Wl,--gc-sections')
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--compiler=msys-gcc', '-c', 'dbg', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('usr\\bin\\gcc', '\n'.join(stderr))
self.assertIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.AssertFileContentNotContains(
os.path.join(bazel_output, paramfile % 'dbg'), '-Wl,--gc-sections')
# Test build in optimize mode.
exit_code, _, stderr = self.RunBazel(
['build', '-s', '--compiler=msys-gcc', '-c', 'opt', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('usr\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.AssertFileContentContains(
os.path.join(bazel_output, paramfile % 'opt'), '-Wl,--gc-sections')
if __name__ == '__main__':
unittest.main()
|
dexterx17/nodoSocket
|
refs/heads/master
|
clients/Python-2.7.6/Lib/encodings/punycode.py
|
586
|
# -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
tdaylan/tdgu
|
refs/heads/master
|
sdss_glob.py
|
1
|
# coding: utf-8
# In[ ]:
# numpy
import numpy as np
from numpy import *
from numpy.random import *
from numpy.random import choice
import matplotlib.pyplot as plt
# scipy
import scipy as sp
from scipy import ndimage
from scipy.interpolate import *
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
from scipy import ndimage
# multiprocessing
import multiprocessing as mp
# healpy
import healpy as hp
from healpy.rotator import angdist
from healpy import ang2pix
# pyfits
import pyfits as pf
# utilities
import os, time, sys, datetime, warnings, getpass, glob, inspect
# tdpy
import tdpy
import sympy
# In[2]:
def writ_sdss():
pixlsize = 0.396 # [arcsec]
npixlside = 100
maxmgang = npixlside * pixlsize / 3600. / 2.
npixlheal = npixlside**2 * 12
apix = deg2rad(pixlsize / 3600.)**2
enerstrg = ['i', 'r', 'g']
nener = len(enerstrg)
iener = arange(nener)
nevtt = 1
from astropy.coordinates import ICRS, Galactic
from astropy import units as u
sdssdataflux = zeros((nener, npixlside, npixlside, nevtt))
for i in iener:
path = os.environ["PCAT_DATA_PATH"] + '/frame-' + enerstrg[i] + '-001458-4-0700.fits'
data, hdr = pf.getdata(path, 0, header=True)
rasccntr = hdr['CRVAL1']
declcntr = hdr['CRVAL2']
rascbndr = zeros(4)
declbndr = zeros(4)
rascbndr[0] = rasccntr - hdr['CRPIX1'] * hdr['CD2_1'] - hdr['CRPIX2'] * hdr['CD2_2']
declbndr[0] = declcntr - hdr['CRPIX1'] * hdr['CD1_1'] - hdr['CRPIX2'] * hdr['CD1_2']
rascbndr[1] = rasccntr - hdr['CRPIX1'] * hdr['CD2_1'] - hdr['CRPIX2'] * hdr['CD2_2']
declbndr[1] = declcntr + hdr['CRPIX1'] * hdr['CD1_1'] - hdr['CRPIX2'] * hdr['CD1_2']
rascbndr[1] = rasccntr - hdr['CRPIX1'] * hdr['CD2_1'] - hdr['CRPIX2'] * hdr['CD2_2']
declbndr[1] = declcntr - hdr['CRPIX1'] * hdr['CD1_1'] - hdr['CRPIX2'] * hdr['CD1_2']
rascbndr[0] = rasccntr - hdr['CRPIX1'] * hdr['CD2_1'] - hdr['CRPIX2'] * hdr['CD2_2']
declbndr[0] = declcntr - hdr['CRPIX1'] * hdr['CD1_1'] - hdr['CRPIX2'] * hdr['CD1_2']
#CRPIX1 = 1.02450000000000E+03 / Column Pixel Coordinate of Reference
#CRPIX2 = 7.44500000000000E+02 / Row Pixel Coordinate of Reference Pix
#CRVAL1 = 6.42142612400000E+01 / DEC at Reference Pixel
#CRVAL2 = 2.51207342810000E+02 / RA at Reference Pixel
#CD1_1 = 4.75626416015645E-05 / DEC degrees per column pixel
#CD1_2 = -9.9116868279565E-05 / DEC degrees per row pixel
#CD2_1 = 9.91802898939385E-05 / RA degrees per column pixel
print 'rascbndr'
print rascbndr
print 'declbndr'
print declbndr
print 'data.shape'
print data.shape
calb, hdr = pf.getdata(path, 1, header=True)
back, hdr = pf.getdata(path, 2, header=True)
xaxi = arange(back['ALLSKY'].shape[1], dtype=float)
yaxi = arange(back['ALLSKY'].shape[2], dtype=float)
back = interp2d(xaxi, yaxi, back['ALLSKY'])(back['XINTERP'].flatten(), back['YINTERP'].flatten())
data /= calb[None, :]
data += back
print 'amin(data)'
print amin(data)
print 'amax(data)'
print amax(data)
print 'amin(back)'
print amin(back)
print 'amax(back)'
print amax(back)
print
data /= apix
print 'Frame RA: ', rasccntr
print 'Frame DEC: ', declcntr
#rasccntr = rasccntr + rascpixlsize * (data.shape[0] - npixlside) / 2. / 3600.
#declcntr = declcntr - declpixlsize * (data.shape[1] - npixlside) / 2. / 3600.
objt = ICRS(ra=rasccntr, dec=declcntr, unit=(u.degree, u.degree))
lgalcntr = objt.galactic.l.degree
bgalcntr = objt.galactic.b.degree
print 'Patch RA: ', rasccntr
print 'Patch DEC: ', declcntr
print 'Patch l: ', lgalcntr
print 'Patch b: ', bgalcntr
sdssdataflux[i, :, :, 0] = data[-npixlside:, -npixlside:]
path = os.environ["PCAT_DATA_PATH"] + '/sdssdataflux.fits'
pf.writeto(path, sdssdataflux, clobber=True)
sdssbackflux = ones((nener, npixlside, npixlside, nevtt)) / apix
path = os.environ["PCAT_DATA_PATH"] + '/sdssbackflux.fits'
pf.writeto(path, sdssbackflux, clobber=True)
sdssexpo = ones((nener, npixlside, npixlside, nevtt))
path = os.environ["PCAT_DATA_PATH"] + '/sdssexpo.fits'
pf.writeto(path, sdssexpo, clobber=True)
def cnfg_sdss_mock():
indxenerincl = arange(3)
indxevttincl = arange(1)
init( \
pathdata=os.environ["PCAT_DATA_PATH"], \
numbswep=100000, \
minmflux=array([1e3]), \
maxmflux=array([1e5]), \
initnumbpnts=array([100]), \
exprtype='sdss', \
pixltype='cart', \
regitype='mes5', \
stdvlbhl=2./3600., \
lgalcntr=202., \
bgalcntr=2., \
radispmrlbhl=5./3600., \
maxmgang=30./3600., \
margsize=2./3600., \
back=['unit'], \
psfntype='doubgaus', \
strgexpo='unit', \
indxevttincl=indxevttincl, \
indxenerincl=indxenerincl, \
datatype='mock', \
numbsidecart=100, \
mockfluxdistslop=array([1.9]), \
)
def cnfg_sdss_expr():
init( \
pathdata=os.environ["PCAT_DATA_PATH"], \
trueinfo=False, \
numbswep=1000000, \
minmflux=ones(3) * 1e3, \
maxmflux=ones(3) * 1e5, \
initnumbpnts=array([10]), \
exprtype='sdss', \
datatype='inpt', \
pixltype='cart', \
regitype='mes5', \
stdvlbhl=2./3600., \
lgalcntr=202., \
bgalcntr=2., \
radispmrlbhl=0.5/3600., \
stdvflux=0.05, \
margsize=2./3600., \
maxmgang=30./3600., \
strgexpr='sdssflux.fits', \
strgexpo='sdssexpo.fits', \
psfntype='doubgaus', \
stdvback=1e-4, \
indxevttincl=arange(1), \
indxenerincl=arange(1), \
)
|
crunchsec/fimap
|
refs/heads/master
|
src/xgoogle/search.py
|
13
|
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class SearchError(Exception):
"""
Base class for Google Search exceptions.
"""
pass
class ParseError(SearchError):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
class SearchResult:
def __init__(self, title, url, desc):
self.title = title
self.url = url
self.desc = desc
def __str__(self):
return 'Google Search Result: "%s"' % self.title
class GoogleSearch(object):
SEARCH_URL_0 = "http://www.google.com/search?q=%(query)s&btnG=Google+Search"
NEXT_PAGE_0 = "http://www.google.com/search?q=%(query)s&start=%(start)d"
SEARCH_URL_1 = "http://www.google.com/search?q=%(query)s&num=%(num)d&btnG=Google+Search"
NEXT_PAGE_1 = "http://www.google.com/search?q=%(query)s&num=%(num)d&start=%(start)d"
def __init__(self, query, random_agent=False, debug=False, page=0):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self.results_info = None
self.eor = False # end of results
self._page = page
self._results_per_page = 10
self._last_from = 0
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_page(self):
return self._page
def _set_page(self, page):
self._page = page
page = property(_get_page, _set_page)
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
""" Gets a page of results """
if self.eor:
return []
MAX_VALUE = 1000000
page = self._get_results_page()
#search_info = self._extract_info(page)
results = self._extract_results(page)
search_info = {'from': self.results_per_page*self._page,
'to': self.results_per_page*self._page + len(results),
'total': MAX_VALUE}
if not self.results_info:
self.results_info = search_info
if self.num_results == 0:
self.eor = True
return []
if not results:
self.eor = True
return []
if self._page > 0 and search_info['from'] == self._last_from:
self.eor = True
return []
if search_info['to'] == search_info['total']:
self.eor = True
self._page += 1
self._last_from = search_info['from']
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = GoogleSearch.SEARCH_URL_0
else:
url = GoogleSearch.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = GoogleSearch.NEXT_PAGE_0
else:
url = GoogleSearch.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SearchError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_info(self, soup):
empty_info = {'from': 0, 'to': 0, 'total': 0}
div_ssb = soup.find('div', id='ssb')
if not div_ssb:
self._maybe_raise(ParseError, "Div with number of results was not found on Google search page", soup)
return empty_info
p = div_ssb.find('p')
if not p:
self._maybe_raise(ParseError, """<p> tag within <div id="ssb"> was not found on Google search page""", soup)
return empty_info
txt = ''.join(p.findAll(text=True))
txt = txt.replace(',', '')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt, re.U)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _extract_results(self, soup):
results = soup.findAll('li', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
desc = self._extract_description(result)
if not title or not url or not desc:
return None
return SearchResult(title, url, desc)
def _extract_title_url(self, result):
#title_a = result.find('a', {'class': re.compile(r'\bl\b')})
title_a = result.find('a')
if not title_a:
self._maybe_raise(ParseError, "Title tag in Google search result was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.match(r'/url\?q=(http[^&]+)&', url)
if match:
url = urllib.unquote(match.group(1))
return title, url
def _extract_description(self, result):
desc_div = result.find('div', {'class': re.compile(r'\bs\b')})
if not desc_div:
self._maybe_raise(ParseError, "Description tag in Google search result was not found", result)
return None
desc_strs = []
def looper(tag):
if not tag: return
for t in tag:
try:
if t.name == 'br': break
except AttributeError:
pass
try:
desc_strs.append(t.string)
except AttributeError:
desc_strs.append(t)
looper(desc_div)
looper(desc_div.find('wbr')) # BeautifulSoup does not self-close <wbr>
desc = ''.join(s for s in desc_strs if s)
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
|
andyneff/voxel-globe
|
refs/heads/master
|
voxel_globe/clif/tools.py
|
1
|
import os
def split_clif(filename):
''' Return dir, camera_id, image_number, extention'''
dirname = os.path.dirname(filename)
filename = os.path.basename(filename)
(filename, extention) = os.path.splitext(filename)
(camera_id, image_number) = filename.split('-')
return (dirname, camera_id, image_number, extention)
|
igorkramaric/resigner
|
refs/heads/master
|
test_project/resigner_tests/urls.py
|
2
|
from django.conf.urls import *
from .views import my_test_api_view
urlpatterns = [
url(r'^my_test_api_url/', my_test_api_view, name="my_test_api"),
]
|
ashutrix03/inteygrate_flaskapp-master
|
refs/heads/master
|
build/lib/yowsup/layers/protocol_privacy/protocolentities/privacylist_iq.py
|
68
|
from yowsup.layers.protocol_iq.protocolentities import IqProtocolEntity
from yowsup.structs import ProtocolTreeNode
class PrivacyListIqProtocolEntity(IqProtocolEntity):
def __init__(self, name = "default"):
super(PrivacyListIqProtocolEntity, self).__init__("jabber:iq:privacy", _type="get")
self.setListName(name)
def setListName(self, name):
self.listName = name
def toProtocolTreeNode(self):
node = super(PrivacyListIqProtocolEntity, self).toProtocolTreeNode()
queryNode = ProtocolTreeNode("query")
listNode = ProtocolTreeNode("list", {"name": self.listName})
queryNode.addChild(listNode)
node.addChild(queryNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = IqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = PrivacyListIqProtocolEntity
entity.setListName(node.getChild("query").getChild("list")["name"])
return entity
|
List3nt0/CodeLibrary
|
refs/heads/master
|
qrcode-generator-master/src/pyqrgen/test.py
|
4
|
#!/usr/bin/env python
import cairo
import pyqrgen
SIZE = 220
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, SIZE, SIZE)
ctx = cairo.Context(surface)
#ctx.scale(WIDTH, HEIGHT)
ctx.rectangle(0, 0, SIZE, SIZE)
ctx.set_source_rgb(1, 1, 1)
ctx.fill()
# autoversion, mode Byte, ecl h
pyqrgen.generate("green pride", 0, 4, 2, 3, ctx, SIZE)
surface.write_to_png("example.png")
|
aehlke/epywing
|
refs/heads/master
|
src/epywing/tests/epwing.py
|
1
|
# -*- coding: utf-8 -*-
#
import unittest
#from epywing.bookmanager import BookManager
from epywing.bookmanager import BookManager
from epywing.epwing import EpwingBook
import os
#class TestEpwing(unittest.TestCase):
# def setUp(self):
# self.
def test_epwing_integrations():
search_path = os.path.split(os.path.abspath(__file__))[0]
print search_path
manager = BookManager()
book_paths = manager.find_books_in_path(search_path)
print book_paths
manager.add_books(*book_paths)
ej = manager.books.items()[1][1]
tai = manager.books.items()[0][1]
print list(ej.search('cute'))
list(tai.search(u'horse'))
list(tai.search(u'horse', search_method='prefix'))[1]
list(manager.search_all('good'))
print list(manager.search_all_and_combine_results('good'))
if __name__ == "__main__":
test_epwing_integrations()
|
jness/django-rest-framework
|
refs/heads/master
|
tests/browsable_api/auth_urls.py
|
95
|
from __future__ import unicode_literals
from django.conf.urls import include, url
from .views import MockView
urlpatterns = [
url(r'^$', MockView.as_view()),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
StevenYCChou/android-kernel
|
refs/heads/master
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
ceramos/micropython
|
refs/heads/master
|
tests/basics/list_extend.py
|
101
|
# test list.__iadd__ and list.extend (they are equivalent)
l = [1, 2]
l.extend([])
print(l)
l.extend([3])
print(l)
l.extend([4, 5])
print(l)
l.extend(range(6, 10))
print(l)
l.extend("abc")
print(l)
l = [1, 2]
l += []
print(l)
l += [3]
print(l)
l += [4, 5]
print(l)
l += range(6, 10)
print(l)
l += "abc"
print(l)
|
glaubitz/fs-uae-debian
|
refs/heads/master
|
launcher/OpenGL/raw/GLES1/_glgets.py
|
33
|
"""glGet* auto-generation of output arrays (DO NOT EDIT, AUTOGENERATED)"""
try:
from OpenGL.raw.GL._lookupint import LookupInt as _L
except ImportError:
def _L(*args):
raise RuntimeError( "Need to define a lookupint for this api" )
_glget_size_mapping = _m = {}
# _m[0x8095] = TODO # GL_DETAIL_TEXTURE_2D_SGIS
# _m[0x809C] = TODO # GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS
# _m[0x809A] = TODO # GL_DETAIL_TEXTURE_LEVEL_SGIS
# _m[0x809B] = TODO # GL_DETAIL_TEXTURE_MODE_SGIS
# _m[0x8124] = TODO # GL_DUAL_TEXTURE_SELECT_SGIS
# _m[0x8191] = TODO # GL_GENERATE_MIPMAP_SGIS
# _m[0x8182] = TODO # GL_LIST_PRIORITY_SGIX
# _m[0x85A0] = TODO # GL_PACK_SUBSAMPLE_RATE_SGIX
# _m[0x8063] = TODO # GL_PROXY_TEXTURE_1D
# _m[0x8063] = TODO # GL_PROXY_TEXTURE_1D_EXT
# _m[0x8064] = TODO # GL_PROXY_TEXTURE_2D
# _m[0x8064] = TODO # GL_PROXY_TEXTURE_2D_EXT
# _m[0x8070] = TODO # GL_PROXY_TEXTURE_3D
# _m[0x8070] = TODO # GL_PROXY_TEXTURE_3D_EXT
# _m[0x8135] = TODO # GL_PROXY_TEXTURE_4D_SGIS
# _m[0x8125] = TODO # GL_QUAD_TEXTURE_SELECT_SGIS
# _m[0x80BF] = TODO # GL_SHADOW_AMBIENT_SGIX
# _m[0x81FB] = TODO # GL_SHARED_TEXTURE_PALETTE_EXT
# _m[0x80B0] = TODO # GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS
# _m[0x8136] = TODO # GL_TEXTURE_4DSIZE_SGIS
# _m[0x8175] = TODO # GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX
# _m[0x1003] = TODO # GL_TEXTURE_COMPONENTS
# _m[0x8147] = TODO # GL_TEXTURE_FILTER4_SIZE_SGIS
# _m[0x819D] = TODO # GL_TEXTURE_GEQUAL_R_SGIX
# _m[0x819C] = TODO # GL_TEXTURE_LEQUAL_R_SGIX
# _m[0x85A1] = TODO # GL_UNPACK_SUBSAMPLE_RATE_SGIX
_m[0x0D5B] = (1,) # GL_ACCUM_ALPHA_BITS
_m[0x0D5A] = (1,) # GL_ACCUM_BLUE_BITS
_m[0x0B80] = (4,) # GL_ACCUM_CLEAR_VALUE
_m[0x0D59] = (1,) # GL_ACCUM_GREEN_BITS
_m[0x0D58] = (1,) # GL_ACCUM_RED_BITS
_m[0x8B89] = (1,) # GL_ACTIVE_ATTRIBUTES
_m[0x8B8A] = (1,) # GL_ACTIVE_ATTRIBUTE_MAX_LENGTH
_m[0x8259] = (1,) # GL_ACTIVE_PROGRAM
_m[0x92F5] = (1,) # GL_ACTIVE_RESOURCES
_m[0x8911] = (1,) # GL_ACTIVE_STENCIL_FACE_EXT
_m[0x84E0] = (1,) # GL_ACTIVE_TEXTURE
_m[0x84E0] = (1,) # GL_ACTIVE_TEXTURE_ARB
_m[0x8B86] = (1,) # GL_ACTIVE_UNIFORMS
_m[0x8A36] = (1,) # GL_ACTIVE_UNIFORM_BLOCKS
_m[0x8A35] = (1,) # GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH
_m[0x8B87] = (1,) # GL_ACTIVE_UNIFORM_MAX_LENGTH
_m[0x9305] = (1,) # GL_ACTIVE_VARIABLES
_m[0x86A5] = (1,) # GL_ACTIVE_VERTEX_UNITS_ARB
_m[0x846E] = (2,) # GL_ALIASED_LINE_WIDTH_RANGE
_m[0x846D] = (2,) # GL_ALIASED_POINT_SIZE_RANGE
_m[0x0D1D] = (1,) # GL_ALPHA_BIAS
_m[0x0D55] = (1,) # GL_ALPHA_BITS
_m[0x8567] = (1,) # GL_ALPHA_MAX_CLAMP_INGR
_m[0x8563] = (1,) # GL_ALPHA_MIN_CLAMP_INGR
_m[0x0D1C] = (1,) # GL_ALPHA_SCALE
_m[0x0BC0] = (1,) # GL_ALPHA_TEST
_m[0x0BC1] = (1,) # GL_ALPHA_TEST_FUNC
_m[0x0BC1] = (1,) # GL_ALPHA_TEST_FUNC_QCOM
_m[0x0BC0] = (1,) # GL_ALPHA_TEST_QCOM
_m[0x0BC2] = (1,) # GL_ALPHA_TEST_REF
_m[0x0BC2] = (1,) # GL_ALPHA_TEST_REF_QCOM
_m[0x1200] = (4,) # GL_AMBIENT
_m[0x1602] = (4,) # GL_AMBIENT_AND_DIFFUSE
_m[0x8894] = (1,) # GL_ARRAY_BUFFER_BINDING
_m[0x8894] = (1,) # GL_ARRAY_BUFFER_BINDING_ARB
_m[0x81A9] = (1,) # GL_ARRAY_ELEMENT_LOCK_COUNT_EXT
_m[0x81A8] = (1,) # GL_ARRAY_ELEMENT_LOCK_FIRST_EXT
_m[0x92FB] = (1,) # GL_ARRAY_SIZE
_m[0x92FE] = (1,) # GL_ARRAY_STRIDE
_m[0x835D] = (1,) # GL_ASYNC_DRAW_PIXELS_SGIX
_m[0x832C] = (1,) # GL_ASYNC_HISTOGRAM_SGIX
_m[0x8329] = (1,) # GL_ASYNC_MARKER_SGIX
_m[0x835E] = (1,) # GL_ASYNC_READ_PIXELS_SGIX
_m[0x835C] = (1,) # GL_ASYNC_TEX_IMAGE_SGIX
_m[0x92C1] = (1,) # GL_ATOMIC_COUNTER_BUFFER_BINDING
_m[0x9301] = (1,) # GL_ATOMIC_COUNTER_BUFFER_INDEX
_m[0x8B85] = (1,) # GL_ATTACHED_SHADERS
_m[0x0BB0] = (1,) # GL_ATTRIB_STACK_DEPTH
_m[0x0D80] = (1,) # GL_AUTO_NORMAL
_m[0x0C00] = (1,) # GL_AUX_BUFFERS
_m[0x843A] = (1,) # GL_BINORMAL_ARRAY_EXT
_m[0x8443] = (1,) # GL_BINORMAL_ARRAY_POINTER_EXT
_m[0x8441] = (1,) # GL_BINORMAL_ARRAY_STRIDE_EXT
_m[0x8440] = (1,) # GL_BINORMAL_ARRAY_TYPE_EXT
_m[0x0BE2] = (1,) # GL_BLEND
_m[0x8005] = (4,) # GL_BLEND_COLOR
_m[0x8005] = (4,) # GL_BLEND_COLOR_EXT
_m[0x0BE0] = (1,) # GL_BLEND_DST
_m[0x80CA] = (1,) # GL_BLEND_DST_ALPHA
_m[0x80CA] = (1,) # GL_BLEND_DST_ALPHA_EXT
_m[0x80C8] = (1,) # GL_BLEND_DST_RGB
_m[0x80C8] = (1,) # GL_BLEND_DST_RGB_EXT
_m[0x8009] = (1,) # GL_BLEND_EQUATION
_m[0x883D] = (1,) # GL_BLEND_EQUATION_ALPHA
_m[0x883D] = (1,) # GL_BLEND_EQUATION_ALPHA_EXT
_m[0x8009] = (1,) # GL_BLEND_EQUATION_EXT
_m[0x8009] = (1,) # GL_BLEND_EQUATION_RGB
_m[0x0BE1] = (1,) # GL_BLEND_SRC
_m[0x80CB] = (1,) # GL_BLEND_SRC_ALPHA
_m[0x80CB] = (1,) # GL_BLEND_SRC_ALPHA_EXT
_m[0x80C9] = (1,) # GL_BLEND_SRC_RGB
_m[0x80C9] = (1,) # GL_BLEND_SRC_RGB_EXT
_m[0x92FD] = (1,) # GL_BLOCK_INDEX
_m[0x0D1B] = (1,) # GL_BLUE_BIAS
_m[0x0D54] = (1,) # GL_BLUE_BITS
_m[0x8566] = (1,) # GL_BLUE_MAX_CLAMP_INGR
_m[0x8562] = (1,) # GL_BLUE_MIN_CLAMP_INGR
_m[0x0D1A] = (1,) # GL_BLUE_SCALE
_m[0x88BB] = (1,) # GL_BUFFER_ACCESS
_m[0x911F] = (1,) # GL_BUFFER_ACCESS_FLAGS
_m[0x9302] = (1,) # GL_BUFFER_BINDING
_m[0x9303] = (1,) # GL_BUFFER_DATA_SIZE
_m[0x88BC] = (1,) # GL_BUFFER_MAPPED
_m[0x9120] = (1,) # GL_BUFFER_MAP_LENGTH
_m[0x9121] = (1,) # GL_BUFFER_MAP_OFFSET
_m[0x88BD] = (1,) # GL_BUFFER_MAP_POINTER
_m[0x8764] = (1,) # GL_BUFFER_SIZE
_m[0x8765] = (1,) # GL_BUFFER_USAGE
_m[0x877C] = (1,) # GL_BUMP_TARGET_ATI
_m[0x8183] = (1,) # GL_CALLIGRAPHIC_FRAGMENT_SGIX
_m[0x891B] = (1,) # GL_CLAMP_FRAGMENT_COLOR
_m[0x891B] = (1,) # GL_CLAMP_FRAGMENT_COLOR_ARB
_m[0x891C] = (1,) # GL_CLAMP_READ_COLOR
_m[0x891C] = (1,) # GL_CLAMP_READ_COLOR_ARB
_m[0x891A] = (1,) # GL_CLAMP_VERTEX_COLOR
_m[0x891A] = (1,) # GL_CLAMP_VERTEX_COLOR_ARB
_m[0x84E1] = (1,) # GL_CLIENT_ACTIVE_TEXTURE
_m[0x84E1] = (1,) # GL_CLIENT_ACTIVE_TEXTURE_ARB
_m[0x0BB1] = (1,) # GL_CLIENT_ATTRIB_STACK_DEPTH
_m[0x3000] = (1,) # GL_CLIP_DISTANCE0
_m[0x3001] = (1,) # GL_CLIP_DISTANCE1
_m[0x3002] = (1,) # GL_CLIP_DISTANCE2
_m[0x3003] = (1,) # GL_CLIP_DISTANCE3
_m[0x3004] = (1,) # GL_CLIP_DISTANCE4
_m[0x3005] = (1,) # GL_CLIP_DISTANCE5
_m[0x3006] = (1,) # GL_CLIP_DISTANCE6
_m[0x3007] = (1,) # GL_CLIP_DISTANCE7
_m[0x3000] = (1,) # GL_CLIP_PLANE0
_m[0x3001] = (1,) # GL_CLIP_PLANE1
_m[0x3002] = (1,) # GL_CLIP_PLANE2
_m[0x3003] = (1,) # GL_CLIP_PLANE3
_m[0x3004] = (1,) # GL_CLIP_PLANE4
_m[0x3005] = (1,) # GL_CLIP_PLANE5
_m[0x80F0] = (1,) # GL_CLIP_VOLUME_CLIPPING_HINT_EXT
_m[0x8975] = (1,) # GL_COLOR_ALPHA_PAIRING_ATI
_m[0x8076] = (1,) # GL_COLOR_ARRAY
_m[0x8898] = (1,) # GL_COLOR_ARRAY_BUFFER_BINDING
_m[0x8898] = (1,) # GL_COLOR_ARRAY_BUFFER_BINDING_ARB
_m[0x8084] = (1,) # GL_COLOR_ARRAY_COUNT_EXT
_m[0x8076] = (1,) # GL_COLOR_ARRAY_EXT
_m[0x8F2D] = (1,) # GL_COLOR_ARRAY_LENGTH_NV
_m[0x8090] = (1,) # GL_COLOR_ARRAY_POINTER
_m[0x8081] = (1,) # GL_COLOR_ARRAY_SIZE
_m[0x8081] = (1,) # GL_COLOR_ARRAY_SIZE_EXT
_m[0x8083] = (1,) # GL_COLOR_ARRAY_STRIDE
_m[0x8083] = (1,) # GL_COLOR_ARRAY_STRIDE_EXT
_m[0x8082] = (1,) # GL_COLOR_ARRAY_TYPE
_m[0x8082] = (1,) # GL_COLOR_ARRAY_TYPE_EXT
_m[0x8835] = (4,) # GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI
_m[0x0C22] = (4,) # GL_COLOR_CLEAR_VALUE
_m[0x8A0F] = (1,) # GL_COLOR_FLOAT_APPLE
_m[0x1603] = (3,) # GL_COLOR_INDEXES
_m[0x0BF2] = (1,) # GL_COLOR_LOGIC_OP
_m[0x0B57] = (1,) # GL_COLOR_MATERIAL
_m[0x0B55] = (1,) # GL_COLOR_MATERIAL_FACE
_m[0x0B56] = (1,) # GL_COLOR_MATERIAL_PARAMETER
_m[0x80B1] = (4,4) # GL_COLOR_MATRIX
_m[0x80B1] = (4,4) # GL_COLOR_MATRIX_SGI
_m[0x80B2] = (1,) # GL_COLOR_MATRIX_STACK_DEPTH
_m[0x80B2] = (1,) # GL_COLOR_MATRIX_STACK_DEPTH_SGI
_m[0x8E20] = (1,) # GL_COLOR_SAMPLES_NV
_m[0x8458] = (1,) # GL_COLOR_SUM
_m[0x8458] = (1,) # GL_COLOR_SUM_ARB
_m[0x854F] = (1,) # GL_COLOR_SUM_CLAMP_NV
_m[0x8458] = (1,) # GL_COLOR_SUM_EXT
_m[0x80D0] = (1,) # GL_COLOR_TABLE
_m[0x80DD] = (1,) # GL_COLOR_TABLE_ALPHA_SIZE
_m[0x80DD] = (1,) # GL_COLOR_TABLE_ALPHA_SIZE_SGI
_m[0x80D7] = (4,) # GL_COLOR_TABLE_BIAS
_m[0x80D7] = (4,) # GL_COLOR_TABLE_BIAS_SGI
_m[0x80DC] = (1,) # GL_COLOR_TABLE_BLUE_SIZE
_m[0x80DC] = (1,) # GL_COLOR_TABLE_BLUE_SIZE_SGI
_m[0x80D8] = (1,) # GL_COLOR_TABLE_FORMAT
_m[0x80D8] = (1,) # GL_COLOR_TABLE_FORMAT_SGI
_m[0x80DB] = (1,) # GL_COLOR_TABLE_GREEN_SIZE
_m[0x80DB] = (1,) # GL_COLOR_TABLE_GREEN_SIZE_SGI
_m[0x80DF] = (1,) # GL_COLOR_TABLE_INTENSITY_SIZE
_m[0x80DF] = (1,) # GL_COLOR_TABLE_INTENSITY_SIZE_SGI
_m[0x80DE] = (1,) # GL_COLOR_TABLE_LUMINANCE_SIZE
_m[0x80DE] = (1,) # GL_COLOR_TABLE_LUMINANCE_SIZE_SGI
_m[0x80DA] = (1,) # GL_COLOR_TABLE_RED_SIZE
_m[0x80DA] = (1,) # GL_COLOR_TABLE_RED_SIZE_SGI
_m[0x80D6] = (4,) # GL_COLOR_TABLE_SCALE
_m[0x80D6] = (4,) # GL_COLOR_TABLE_SCALE_SGI
_m[0x80D0] = (1,) # GL_COLOR_TABLE_SGI
_m[0x80D9] = (1,) # GL_COLOR_TABLE_WIDTH
_m[0x80D9] = (1,) # GL_COLOR_TABLE_WIDTH_SGI
_m[0x0C23] = (4,) # GL_COLOR_WRITEMASK
_m[0x8572] = (1,) # GL_COMBINE_ALPHA
_m[0x8571] = (1,) # GL_COMBINE_RGB
_m[0x8E4B] = (1,) # GL_COMPATIBLE_SUBROUTINES
_m[0x8B81] = (1,) # GL_COMPILE_STATUS
_m[0x86A3] = (_L(0x86A2),) # GL_COMPRESSED_TEXTURE_FORMATS
_m[0x86A3] = (_L(0x86A2),) # GL_COMPRESSED_TEXTURE_FORMATS_ARB
_m[0x91B9] = (1,) # GL_COMPUTE_SHADER
_m[0x1207] = (1,) # GL_CONSTANT_ATTENUATION
_m[0x852A] = (4,) # GL_CONSTANT_COLOR0_NV
_m[0x852B] = (4,) # GL_CONSTANT_COLOR1_NV
_m[0x86E5] = (3,) # GL_CONST_EYE_NV
_m[0x821E] = (1,) # GL_CONTEXT_FLAGS
_m[0x00000002] = (1,) # GL_CONTEXT_FLAG_DEBUG_BIT
_m[0x00000004] = (1,) # GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB
_m[0x9126] = (1,) # GL_CONTEXT_PROFILE_MASK
_m[0x8010] = (1,) # GL_CONVOLUTION_1D
_m[0x8010] = (1,) # GL_CONVOLUTION_1D_EXT
_m[0x8011] = (1,) # GL_CONVOLUTION_2D
_m[0x8011] = (1,) # GL_CONVOLUTION_2D_EXT
_m[0x8154] = (4,) # GL_CONVOLUTION_BORDER_COLOR
_m[0x8013] = (1,) # GL_CONVOLUTION_BORDER_MODE
_m[0x8013] = (1,) # GL_CONVOLUTION_BORDER_MODE_EXT
_m[0x8015] = (4,) # GL_CONVOLUTION_FILTER_BIAS
_m[0x8015] = (4,) # GL_CONVOLUTION_FILTER_BIAS_EXT
_m[0x8014] = (4,) # GL_CONVOLUTION_FILTER_SCALE
_m[0x8014] = (4,) # GL_CONVOLUTION_FILTER_SCALE_EXT
_m[0x8017] = (1,) # GL_CONVOLUTION_FORMAT
_m[0x8019] = (1,) # GL_CONVOLUTION_HEIGHT
_m[0x8316] = (1,) # GL_CONVOLUTION_HINT_SGIX
_m[0x8018] = (1,) # GL_CONVOLUTION_WIDTH
_m[0x8862] = (1,) # GL_COORD_REPLACE
_m[0x8F36] = (1,) # GL_COPY_READ_BUFFER
_m[0x8F37] = (1,) # GL_COPY_WRITE_BUFFER
_m[0x8ED4] = (1,) # GL_COVERAGE_SAMPLES_NV
_m[0x0B44] = (1,) # GL_CULL_FACE
_m[0x0B45] = (1,) # GL_CULL_FACE_MODE
_m[0x86E0] = (4,) # GL_CULL_MODES_NV
_m[0x81AA] = (1,) # GL_CULL_VERTEX_EXT
_m[0x81AB] = (1,) # GL_CULL_VERTEX_EYE_POSITION_EXT
_m[0x81AC] = (1,) # GL_CULL_VERTEX_OBJECT_POSITION_EXT
_m[0x843C] = (1,) # GL_CURRENT_BINORMAL_EXT
_m[0x0B00] = (4,) # GL_CURRENT_COLOR
_m[0x8453] = (1,) # GL_CURRENT_FOG_COORD
_m[0x8453] = (1,) # GL_CURRENT_FOG_COORDINATE
_m[0x8453] = (1,) # GL_CURRENT_FOG_COORDINATE_EXT
_m[0x0B01] = (1,) # GL_CURRENT_INDEX
_m[0x8641] = (4, 4) # GL_CURRENT_MATRIX_ARB
_m[0x8845] = (1,) # GL_CURRENT_MATRIX_INDEX_ARB
_m[0x8641] = (4, 4) # GL_CURRENT_MATRIX_NV
_m[0x8640] = (1,) # GL_CURRENT_MATRIX_STACK_DEPTH_ARB
_m[0x8640] = (1,) # GL_CURRENT_MATRIX_STACK_DEPTH_NV
_m[0x0B02] = (3,) # GL_CURRENT_NORMAL
_m[0x8865] = (1,) # GL_CURRENT_OCCLUSION_QUERY_ID_NV
_m[0x8843] = (1,) # GL_CURRENT_PALETTE_MATRIX_ARB
_m[0x8B8D] = (1,) # GL_CURRENT_PROGRAM
_m[0x8865] = (1,) # GL_CURRENT_QUERY
_m[0x0B04] = (4,) # GL_CURRENT_RASTER_COLOR
_m[0x0B09] = (1,) # GL_CURRENT_RASTER_DISTANCE
_m[0x0B05] = (1,) # GL_CURRENT_RASTER_INDEX
_m[0x8406] = (1,) # GL_CURRENT_RASTER_NORMAL_SGIX
_m[0x0B07] = (4,) # GL_CURRENT_RASTER_POSITION
_m[0x0B08] = (1,) # GL_CURRENT_RASTER_POSITION_VALID
_m[0x0B06] = (4,) # GL_CURRENT_RASTER_TEXTURE_COORDS
_m[0x8459] = (4,) # GL_CURRENT_SECONDARY_COLOR
_m[0x8459] = (1,) # GL_CURRENT_SECONDARY_COLOR_EXT
_m[0x843B] = (1,) # GL_CURRENT_TANGENT_EXT
_m[0x0B03] = (4,) # GL_CURRENT_TEXTURE_COORDS
_m[0x8626] = (4,) # GL_CURRENT_VERTEX_ATTRIB
_m[0x850B] = (1,) # GL_CURRENT_VERTEX_WEIGHT_EXT
_m[0x86A8] = (1,) # GL_CURRENT_WEIGHT_ARB
_m[0x8244] = (1,) # GL_DEBUG_CALLBACK_FUNCTION
_m[0x8245] = (1,) # GL_DEBUG_CALLBACK_USER_PARAM
_m[0x826D] = (1,) # GL_DEBUG_GROUP_STACK_DEPTH
_m[0x9145] = (1,) # GL_DEBUG_LOGGED_MESSAGES
_m[0x9145] = (1,) # GL_DEBUG_LOGGED_MESSAGES_AMD
_m[0x9145] = (1,) # GL_DEBUG_LOGGED_MESSAGES_ARB
_m[0x8243] = (1,) # GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH
_m[0x8243] = (1,) # GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_ARB
_m[0x92E0] = (1,) # GL_DEBUG_OUTPUT
_m[0x8242] = (1,) # GL_DEBUG_OUTPUT_SYNCHRONOUS
_m[0x8196] = (1,) # GL_DEFORMATIONS_MASK_SGIX
_m[0x8B80] = (1,) # GL_DELETE_STATUS
_m[0x0D1F] = (1,) # GL_DEPTH_BIAS
_m[0x0D56] = (1,) # GL_DEPTH_BITS
_m[0x8891] = (1,) # GL_DEPTH_BOUNDS_EXT
_m[0x8890] = (1,) # GL_DEPTH_BOUNDS_TEST_EXT
_m[0x8DAF] = (1,) # GL_DEPTH_BUFFER_FLOAT_MODE_NV
_m[0x864F] = (1,) # GL_DEPTH_CLAMP
_m[0x901F] = (1,) # GL_DEPTH_CLAMP_FAR_AMD
_m[0x901E] = (1,) # GL_DEPTH_CLAMP_NEAR_AMD
_m[0x864F] = (1,) # GL_DEPTH_CLAMP_NV
_m[0x0B73] = (1,) # GL_DEPTH_CLEAR_VALUE
_m[0x0B74] = (1,) # GL_DEPTH_FUNC
_m[0x0B70] = (2,) # GL_DEPTH_RANGE
_m[0x0D1E] = (1,) # GL_DEPTH_SCALE
_m[0x90EA] = (1,) # GL_DEPTH_STENCIL_TEXTURE_MODE
_m[0x0B71] = (1,) # GL_DEPTH_TEST
_m[0x884B] = (1,) # GL_DEPTH_TEXTURE_MODE
_m[0x0B72] = (1,) # GL_DEPTH_WRITEMASK
_m[0x8096] = (1,) # GL_DETAIL_TEXTURE_2D_BINDING_SGIS
_m[0x1201] = (4,) # GL_DIFFUSE
_m[0x90EF] = (1,) # GL_DISPATCH_INDIRECT_BUFFER_BINDING
_m[0x8129] = (3,) # GL_DISTANCE_ATTENUATION_SGIS
_m[0x0BD0] = (1,) # GL_DITHER
_m[0x0C32] = (1,) # GL_DOUBLEBUFFER
_m[0x0C01] = (1,) # GL_DRAW_BUFFER
_m[0x8825] = (1,) # GL_DRAW_BUFFER0
_m[0x8825] = (1,) # GL_DRAW_BUFFER0_ARB
_m[0x8825] = (1,) # GL_DRAW_BUFFER0_ATI
_m[0x8826] = (1,) # GL_DRAW_BUFFER1
_m[0x882F] = (1,) # GL_DRAW_BUFFER10
_m[0x882F] = (1,) # GL_DRAW_BUFFER10_ARB
_m[0x882F] = (1,) # GL_DRAW_BUFFER10_ATI
_m[0x8830] = (1,) # GL_DRAW_BUFFER11
_m[0x8830] = (1,) # GL_DRAW_BUFFER11_ARB
_m[0x8830] = (1,) # GL_DRAW_BUFFER11_ATI
_m[0x8831] = (1,) # GL_DRAW_BUFFER12
_m[0x8831] = (1,) # GL_DRAW_BUFFER12_ARB
_m[0x8831] = (1,) # GL_DRAW_BUFFER12_ATI
_m[0x8832] = (1,) # GL_DRAW_BUFFER13
_m[0x8832] = (1,) # GL_DRAW_BUFFER13_ARB
_m[0x8832] = (1,) # GL_DRAW_BUFFER13_ATI
_m[0x8833] = (1,) # GL_DRAW_BUFFER14
_m[0x8833] = (1,) # GL_DRAW_BUFFER14_ARB
_m[0x8833] = (1,) # GL_DRAW_BUFFER14_ATI
_m[0x8834] = (1,) # GL_DRAW_BUFFER15
_m[0x8834] = (1,) # GL_DRAW_BUFFER15_ARB
_m[0x8834] = (1,) # GL_DRAW_BUFFER15_ATI
_m[0x8826] = (1,) # GL_DRAW_BUFFER1_ARB
_m[0x8826] = (1,) # GL_DRAW_BUFFER1_ATI
_m[0x8827] = (1,) # GL_DRAW_BUFFER2
_m[0x8827] = (1,) # GL_DRAW_BUFFER2_ARB
_m[0x8827] = (1,) # GL_DRAW_BUFFER2_ATI
_m[0x8828] = (1,) # GL_DRAW_BUFFER3
_m[0x8828] = (1,) # GL_DRAW_BUFFER3_ARB
_m[0x8828] = (1,) # GL_DRAW_BUFFER3_ATI
_m[0x8829] = (1,) # GL_DRAW_BUFFER4
_m[0x8829] = (1,) # GL_DRAW_BUFFER4_ARB
_m[0x8829] = (1,) # GL_DRAW_BUFFER4_ATI
_m[0x882A] = (1,) # GL_DRAW_BUFFER5
_m[0x882A] = (1,) # GL_DRAW_BUFFER5_ARB
_m[0x882A] = (1,) # GL_DRAW_BUFFER5_ATI
_m[0x882B] = (1,) # GL_DRAW_BUFFER6
_m[0x882B] = (1,) # GL_DRAW_BUFFER6_ARB
_m[0x882B] = (1,) # GL_DRAW_BUFFER6_ATI
_m[0x882C] = (1,) # GL_DRAW_BUFFER7
_m[0x882C] = (1,) # GL_DRAW_BUFFER7_ARB
_m[0x882C] = (1,) # GL_DRAW_BUFFER7_ATI
_m[0x882D] = (1,) # GL_DRAW_BUFFER8
_m[0x882D] = (1,) # GL_DRAW_BUFFER8_ARB
_m[0x882D] = (1,) # GL_DRAW_BUFFER8_ATI
_m[0x882E] = (1,) # GL_DRAW_BUFFER9
_m[0x882E] = (1,) # GL_DRAW_BUFFER9_ARB
_m[0x882E] = (1,) # GL_DRAW_BUFFER9_ATI
_m[0x0C01] = (1,) # GL_DRAW_BUFFER_EXT
_m[0x8CA9] = (1,) # GL_DRAW_FRAMEBUFFER
_m[0x8CA6] = (1,) # GL_DRAW_FRAMEBUFFER_BINDING
_m[0x8F43] = (1,) # GL_DRAW_INDIRECT_BUFFER_BINDING
_m[0x8716] = (1,) # GL_DS_BIAS_NV
_m[0x8710] = (1,) # GL_DS_SCALE_NV
_m[0x8717] = (1,) # GL_DT_BIAS_NV
_m[0x8711] = (1,) # GL_DT_SCALE_NV
_m[0x0B43] = (1,) # GL_EDGE_FLAG
_m[0x8079] = (1,) # GL_EDGE_FLAG_ARRAY
_m[0x889B] = (1,) # GL_EDGE_FLAG_ARRAY_BUFFER_BINDING
_m[0x889B] = (1,) # GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB
_m[0x808D] = (1,) # GL_EDGE_FLAG_ARRAY_COUNT_EXT
_m[0x8079] = (1,) # GL_EDGE_FLAG_ARRAY_EXT
_m[0x8F30] = (1,) # GL_EDGE_FLAG_ARRAY_LENGTH_NV
_m[0x8093] = (1,) # GL_EDGE_FLAG_ARRAY_POINTER
_m[0x808C] = (1,) # GL_EDGE_FLAG_ARRAY_STRIDE
_m[0x808C] = (1,) # GL_EDGE_FLAG_ARRAY_STRIDE_EXT
_m[0x8895] = (1,) # GL_ELEMENT_ARRAY_BUFFER_BINDING
_m[0x8895] = (1,) # GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB
_m[0x8F33] = (1,) # GL_ELEMENT_ARRAY_LENGTH_NV
_m[0x8A0D] = (1,) # GL_ELEMENT_ARRAY_TYPE_APPLE
_m[0x8769] = (1,) # GL_ELEMENT_ARRAY_TYPE_ATI
_m[0x1600] = (4,) # GL_EMISSION
_m[0x86C5] = (1,) # GL_EVAL_FRACTIONAL_TESSELLATION_NV
_m[0x86C6] = (1,) # GL_EVAL_VERTEX_ATTRIB0_NV
_m[0x86D0] = (1,) # GL_EVAL_VERTEX_ATTRIB10_NV
_m[0x86D1] = (1,) # GL_EVAL_VERTEX_ATTRIB11_NV
_m[0x86D2] = (1,) # GL_EVAL_VERTEX_ATTRIB12_NV
_m[0x86D3] = (1,) # GL_EVAL_VERTEX_ATTRIB13_NV
_m[0x86D4] = (1,) # GL_EVAL_VERTEX_ATTRIB14_NV
_m[0x86D5] = (1,) # GL_EVAL_VERTEX_ATTRIB15_NV
_m[0x86C7] = (1,) # GL_EVAL_VERTEX_ATTRIB1_NV
_m[0x86C8] = (1,) # GL_EVAL_VERTEX_ATTRIB2_NV
_m[0x86C9] = (1,) # GL_EVAL_VERTEX_ATTRIB3_NV
_m[0x86CA] = (1,) # GL_EVAL_VERTEX_ATTRIB4_NV
_m[0x86CB] = (1,) # GL_EVAL_VERTEX_ATTRIB5_NV
_m[0x86CC] = (1,) # GL_EVAL_VERTEX_ATTRIB6_NV
_m[0x86CD] = (1,) # GL_EVAL_VERTEX_ATTRIB7_NV
_m[0x86CE] = (1,) # GL_EVAL_VERTEX_ATTRIB8_NV
_m[0x86CF] = (1,) # GL_EVAL_VERTEX_ATTRIB9_NV
_m[0x1F03] = (1,) # GL_EXTENSIONS
_m[0x81F6] = (7,) # GL_EYE_LINE_SGIS
_m[0x2502] = (4,) # GL_EYE_PLANE
_m[0x81F4] = (4,) # GL_EYE_POINT_SGIS
_m[0x0DF0] = (1,) # GL_FEEDBACK_BUFFER_POINTER
_m[0x0DF1] = (1,) # GL_FEEDBACK_BUFFER_SIZE
_m[0x0DF2] = (1,) # GL_FEEDBACK_BUFFER_TYPE
_m[0x888D] = (4,) # GL_FLOAT_CLEAR_COLOR_VALUE_NV
_m[0x888E] = (1,) # GL_FLOAT_RGBA_MODE_NV
_m[0x0B60] = (1,) # GL_FOG
_m[0x0B66] = (4,) # GL_FOG_COLOR
_m[0x889D] = (1,) # GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB
_m[0x8455] = (1,) # GL_FOG_COORDINATE_ARRAY_STRIDE_EXT
_m[0x8454] = (1,) # GL_FOG_COORDINATE_ARRAY_TYPE_EXT
_m[0x8457] = (1,) # GL_FOG_COORD_ARRAY
_m[0x889D] = (1,) # GL_FOG_COORD_ARRAY_BUFFER_BINDING
_m[0x8F32] = (1,) # GL_FOG_COORD_ARRAY_LENGTH_NV
_m[0x8455] = (1,) # GL_FOG_COORD_ARRAY_STRIDE
_m[0x8454] = (1,) # GL_FOG_COORD_ARRAY_TYPE
_m[0x8450] = (1,) # GL_FOG_COORD_SRC
_m[0x0B62] = (1,) # GL_FOG_DENSITY
_m[0x855A] = (1,) # GL_FOG_DISTANCE_MODE_NV
_m[0x0B64] = (1,) # GL_FOG_END
_m[0x812B] = (1,) # GL_FOG_FUNC_POINTS_SGIS
_m[0x0C54] = (1,) # GL_FOG_HINT
_m[0x0B61] = (1,) # GL_FOG_INDEX
_m[0x0B65] = (1,) # GL_FOG_MODE
_m[0x8198] = (1,) # GL_FOG_OFFSET_SGIX
_m[0x8199] = (4,) # GL_FOG_OFFSET_VALUE_SGIX
_m[0x0B63] = (1,) # GL_FOG_START
_m[0x8402] = (1,) # GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX
_m[0x8403] = (1,) # GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX
_m[0x8401] = (1,) # GL_FRAGMENT_COLOR_MATERIAL_SGIX
_m[0x840C] = (1,) # GL_FRAGMENT_LIGHT0_SGIX
_m[0x8400] = (1,) # GL_FRAGMENT_LIGHTING_SGIX
_m[0x840A] = (4,) # GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX
_m[0x8408] = (1,) # GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX
_m[0x840B] = (1,) # GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX
_m[0x8409] = (1,) # GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX
_m[0x8804] = (1,) # GL_FRAGMENT_PROGRAM_ARB
_m[0x8873] = (1,) # GL_FRAGMENT_PROGRAM_BINDING_NV
_m[0x8870] = (1,) # GL_FRAGMENT_PROGRAM_NV
_m[0x8B30] = (1,) # GL_FRAGMENT_SHADER
_m[0x8920] = (1,) # GL_FRAGMENT_SHADER_ATI
_m[0x8B8B] = (1,) # GL_FRAGMENT_SHADER_DERIVATIVE_HINT
_m[0x8B8B] = (1,) # GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB
_m[0x8215] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE
_m[0x8214] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE
_m[0x8210] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING
_m[0x8211] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE
_m[0x8216] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE
_m[0x8213] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE
_m[0x8DA7] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_LAYERED
_m[0x8CD1] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME
_m[0x8CD0] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE
_m[0x8212] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE
_m[0x8217] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE
_m[0x8CD3] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE
_m[0x8CD4] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER
_m[0x8CD2] = (1,) # GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL
_m[0x8CA6] = (1,) # GL_FRAMEBUFFER_BINDING_EXT
_m[0x9314] = (1,) # GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS
_m[0x9311] = (1,) # GL_FRAMEBUFFER_DEFAULT_HEIGHT
_m[0x9312] = (1,) # GL_FRAMEBUFFER_DEFAULT_LAYERS
_m[0x9313] = (1,) # GL_FRAMEBUFFER_DEFAULT_SAMPLES
_m[0x9310] = (1,) # GL_FRAMEBUFFER_DEFAULT_WIDTH
_m[0x8DB9] = (1,) # GL_FRAMEBUFFER_SRGB
_m[0x8DBA] = (1,) # GL_FRAMEBUFFER_SRGB_CAPABLE_EXT
_m[0x8DB9] = (1,) # GL_FRAMEBUFFER_SRGB_EXT
_m[0x818C] = (1,) # GL_FRAMEZOOM_FACTOR_SGIX
_m[0x818B] = (1,) # GL_FRAMEZOOM_SGIX
_m[0x0B46] = (1,) # GL_FRONT_FACE
_m[0x8191] = (1,) # GL_GENERATE_MIPMAP
_m[0x8192] = (1,) # GL_GENERATE_MIPMAP_HINT
_m[0x8192] = (1,) # GL_GENERATE_MIPMAP_HINT_SGIS
_m[0x8DDB] = (1,) # GL_GEOMETRY_INPUT_TYPE_ARB
_m[0x8DDC] = (1,) # GL_GEOMETRY_OUTPUT_TYPE_ARB
_m[0x8DD9] = (1,) # GL_GEOMETRY_SHADER
_m[0x8DDA] = (1,) # GL_GEOMETRY_VERTICES_OUT_ARB
_m[0x81DA] = (1,) # GL_GLOBAL_ALPHA_FACTOR_SUN
_m[0x9049] = (1,) # GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX
_m[0x9047] = (1,) # GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX
_m[0x904B] = (1,) # GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX
_m[0x904A] = (1,) # GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX
_m[0x9048] = (1,) # GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX
_m[0x0D19] = (1,) # GL_GREEN_BIAS
_m[0x0D53] = (1,) # GL_GREEN_BITS
_m[0x8565] = (1,) # GL_GREEN_MAX_CLAMP_INGR
_m[0x8561] = (1,) # GL_GREEN_MIN_CLAMP_INGR
_m[0x0D18] = (1,) # GL_GREEN_SCALE
_m[0x8024] = (1,) # GL_HISTOGRAM
_m[0x802B] = (1,) # GL_HISTOGRAM_ALPHA_SIZE
_m[0x802B] = (1,) # GL_HISTOGRAM_ALPHA_SIZE_EXT
_m[0x802A] = (1,) # GL_HISTOGRAM_BLUE_SIZE
_m[0x802A] = (1,) # GL_HISTOGRAM_BLUE_SIZE_EXT
_m[0x8024] = (1,) # GL_HISTOGRAM_EXT
_m[0x8027] = (1,) # GL_HISTOGRAM_FORMAT
_m[0x8027] = (1,) # GL_HISTOGRAM_FORMAT_EXT
_m[0x8029] = (1,) # GL_HISTOGRAM_GREEN_SIZE
_m[0x8029] = (1,) # GL_HISTOGRAM_GREEN_SIZE_EXT
_m[0x802C] = (1,) # GL_HISTOGRAM_LUMINANCE_SIZE
_m[0x802C] = (1,) # GL_HISTOGRAM_LUMINANCE_SIZE_EXT
_m[0x8028] = (1,) # GL_HISTOGRAM_RED_SIZE
_m[0x8028] = (1,) # GL_HISTOGRAM_RED_SIZE_EXT
_m[0x802D] = (1,) # GL_HISTOGRAM_SINK
_m[0x802D] = (1,) # GL_HISTOGRAM_SINK_EXT
_m[0x8026] = (1,) # GL_HISTOGRAM_WIDTH
_m[0x8026] = (1,) # GL_HISTOGRAM_WIDTH_EXT
_m[0x8714] = (1,) # GL_HI_BIAS_NV
_m[0x870E] = (1,) # GL_HI_SCALE_NV
_m[0x8B9B] = (1,) # GL_IMPLEMENTATION_COLOR_READ_FORMAT
_m[0x8B9A] = (1,) # GL_IMPLEMENTATION_COLOR_READ_TYPE
_m[0x8077] = (1,) # GL_INDEX_ARRAY
_m[0x8899] = (1,) # GL_INDEX_ARRAY_BUFFER_BINDING
_m[0x8899] = (1,) # GL_INDEX_ARRAY_BUFFER_BINDING_ARB
_m[0x8087] = (1,) # GL_INDEX_ARRAY_COUNT_EXT
_m[0x8077] = (1,) # GL_INDEX_ARRAY_EXT
_m[0x8F2E] = (1,) # GL_INDEX_ARRAY_LENGTH_NV
_m[0x8091] = (1,) # GL_INDEX_ARRAY_POINTER
_m[0x8086] = (1,) # GL_INDEX_ARRAY_STRIDE
_m[0x8086] = (1,) # GL_INDEX_ARRAY_STRIDE_EXT
_m[0x8085] = (1,) # GL_INDEX_ARRAY_TYPE
_m[0x8085] = (1,) # GL_INDEX_ARRAY_TYPE_EXT
_m[0x0D51] = (1,) # GL_INDEX_BITS
_m[0x0C20] = (1,) # GL_INDEX_CLEAR_VALUE
_m[0x0BF1] = (1,) # GL_INDEX_LOGIC_OP
_m[0x0C30] = (1,) # GL_INDEX_MODE
_m[0x0D13] = (1,) # GL_INDEX_OFFSET
_m[0x0D12] = (1,) # GL_INDEX_SHIFT
_m[0x0C21] = (1,) # GL_INDEX_WRITEMASK
_m[0x8B84] = (1,) # GL_INFO_LOG_LENGTH
_m[0x8181] = (1,) # GL_INSTRUMENT_MEASUREMENTS_SGIX
_m[0x8980] = (1,) # GL_INTERLACE_OML
_m[0x8568] = (1,) # GL_INTERLACE_READ_INGR
_m[0x8981] = (1,) # GL_INTERLACE_READ_OML
_m[0x8094] = (1,) # GL_INTERLACE_SGIX
_m[0x817F] = (1,) # GL_IR_INSTRUMENT1_SGIX
_m[0x92E7] = (1,) # GL_IS_PER_PATCH
_m[0x9300] = (1,) # GL_IS_ROW_MAJOR
_m[0x825E] = (1,) # GL_LAYER_PROVOKING_VERTEX
_m[0x4000] = (1,) # GL_LIGHT0
_m[0x4001] = (1,) # GL_LIGHT1
_m[0x4002] = (1,) # GL_LIGHT2
_m[0x4003] = (1,) # GL_LIGHT3
_m[0x4004] = (1,) # GL_LIGHT4
_m[0x4005] = (1,) # GL_LIGHT5
_m[0x4006] = (1,) # GL_LIGHT6
_m[0x4007] = (1,) # GL_LIGHT7
_m[0x0B50] = (1,) # GL_LIGHTING
_m[0x8407] = (1,) # GL_LIGHT_ENV_MODE_SGIX
_m[0x0B53] = (4,) # GL_LIGHT_MODEL_AMBIENT
_m[0x81F8] = (1,) # GL_LIGHT_MODEL_COLOR_CONTROL
_m[0x81F8] = (1,) # GL_LIGHT_MODEL_COLOR_CONTROL_EXT
_m[0x0B51] = (1,) # GL_LIGHT_MODEL_LOCAL_VIEWER
_m[0x0B52] = (1,) # GL_LIGHT_MODEL_TWO_SIDE
_m[0x1208] = (1,) # GL_LINEAR_ATTENUATION
_m[0x0B20] = (1,) # GL_LINE_SMOOTH
_m[0x0C52] = (1,) # GL_LINE_SMOOTH_HINT
_m[0x0B24] = (1,) # GL_LINE_STIPPLE
_m[0x0B25] = (1,) # GL_LINE_STIPPLE_PATTERN
_m[0x0B26] = (1,) # GL_LINE_STIPPLE_REPEAT
_m[0x0B21] = (1,) # GL_LINE_WIDTH
_m[0x0B23] = (1,) # GL_LINE_WIDTH_GRANULARITY
_m[0x0B22] = (2,) # GL_LINE_WIDTH_RANGE
_m[0x8B82] = (1,) # GL_LINK_STATUS
_m[0x0B32] = (1,) # GL_LIST_BASE
_m[0x0B33] = (1,) # GL_LIST_INDEX
_m[0x0B30] = (1,) # GL_LIST_MODE
_m[0x930E] = (1,) # GL_LOCATION
_m[0x930F] = (1,) # GL_LOCATION_INDEX
_m[0x0BF1] = (1,) # GL_LOGIC_OP
_m[0x0BF0] = (1,) # GL_LOGIC_OP_MODE
_m[0x8252] = (1,) # GL_LOSE_CONTEXT_ON_RESET_ARB
_m[0x8715] = (1,) # GL_LO_BIAS_NV
_m[0x870F] = (1,) # GL_LO_SCALE_NV
_m[0x8718] = (1,) # GL_MAGNITUDE_BIAS_NV
_m[0x8712] = (1,) # GL_MAGNITUDE_SCALE_NV
_m[0x821B] = (1,) # GL_MAJOR_VERSION
_m[0x0D90] = (1,) # GL_MAP1_COLOR_4
_m[0x0DD0] = (2,) # GL_MAP1_GRID_DOMAIN
_m[0x0DD1] = (1,) # GL_MAP1_GRID_SEGMENTS
_m[0x0D91] = (1,) # GL_MAP1_INDEX
_m[0x0D92] = (1,) # GL_MAP1_NORMAL
_m[0x0D93] = (1,) # GL_MAP1_TEXTURE_COORD_1
_m[0x0D94] = (1,) # GL_MAP1_TEXTURE_COORD_2
_m[0x0D95] = (1,) # GL_MAP1_TEXTURE_COORD_3
_m[0x0D96] = (1,) # GL_MAP1_TEXTURE_COORD_4
_m[0x0D97] = (1,) # GL_MAP1_VERTEX_3
_m[0x0D98] = (1,) # GL_MAP1_VERTEX_4
_m[0x8660] = (4,) # GL_MAP1_VERTEX_ATTRIB0_4_NV
_m[0x866A] = (4,) # GL_MAP1_VERTEX_ATTRIB10_4_NV
_m[0x866B] = (4,) # GL_MAP1_VERTEX_ATTRIB11_4_NV
_m[0x866C] = (4,) # GL_MAP1_VERTEX_ATTRIB12_4_NV
_m[0x866D] = (4,) # GL_MAP1_VERTEX_ATTRIB13_4_NV
_m[0x866E] = (4,) # GL_MAP1_VERTEX_ATTRIB14_4_NV
_m[0x866F] = (4,) # GL_MAP1_VERTEX_ATTRIB15_4_NV
_m[0x8661] = (4,) # GL_MAP1_VERTEX_ATTRIB1_4_NV
_m[0x8662] = (4,) # GL_MAP1_VERTEX_ATTRIB2_4_NV
_m[0x8663] = (4,) # GL_MAP1_VERTEX_ATTRIB3_4_NV
_m[0x8664] = (4,) # GL_MAP1_VERTEX_ATTRIB4_4_NV
_m[0x8665] = (4,) # GL_MAP1_VERTEX_ATTRIB5_4_NV
_m[0x8666] = (4,) # GL_MAP1_VERTEX_ATTRIB6_4_NV
_m[0x8667] = (4,) # GL_MAP1_VERTEX_ATTRIB7_4_NV
_m[0x8668] = (4,) # GL_MAP1_VERTEX_ATTRIB8_4_NV
_m[0x8669] = (4,) # GL_MAP1_VERTEX_ATTRIB9_4_NV
_m[0x0DB0] = (1,) # GL_MAP2_COLOR_4
_m[0x0DD2] = (4,) # GL_MAP2_GRID_DOMAIN
_m[0x0DD3] = (2,) # GL_MAP2_GRID_SEGMENTS
_m[0x0DB1] = (1,) # GL_MAP2_INDEX
_m[0x0DB2] = (1,) # GL_MAP2_NORMAL
_m[0x0DB3] = (1,) # GL_MAP2_TEXTURE_COORD_1
_m[0x0DB4] = (1,) # GL_MAP2_TEXTURE_COORD_2
_m[0x0DB5] = (1,) # GL_MAP2_TEXTURE_COORD_3
_m[0x0DB6] = (1,) # GL_MAP2_TEXTURE_COORD_4
_m[0x0DB7] = (1,) # GL_MAP2_VERTEX_3
_m[0x0DB8] = (1,) # GL_MAP2_VERTEX_4
_m[0x8670] = (4,) # GL_MAP2_VERTEX_ATTRIB0_4_NV
_m[0x867A] = (4,) # GL_MAP2_VERTEX_ATTRIB10_4_NV
_m[0x867B] = (4,) # GL_MAP2_VERTEX_ATTRIB11_4_NV
_m[0x867C] = (4,) # GL_MAP2_VERTEX_ATTRIB12_4_NV
_m[0x867D] = (4,) # GL_MAP2_VERTEX_ATTRIB13_4_NV
_m[0x867E] = (4,) # GL_MAP2_VERTEX_ATTRIB14_4_NV
_m[0x867F] = (4,) # GL_MAP2_VERTEX_ATTRIB15_4_NV
_m[0x8671] = (4,) # GL_MAP2_VERTEX_ATTRIB1_4_NV
_m[0x8672] = (4,) # GL_MAP2_VERTEX_ATTRIB2_4_NV
_m[0x8673] = (4,) # GL_MAP2_VERTEX_ATTRIB3_4_NV
_m[0x8674] = (4,) # GL_MAP2_VERTEX_ATTRIB4_4_NV
_m[0x8675] = (4,) # GL_MAP2_VERTEX_ATTRIB5_4_NV
_m[0x8676] = (4,) # GL_MAP2_VERTEX_ATTRIB6_4_NV
_m[0x8677] = (4,) # GL_MAP2_VERTEX_ATTRIB7_4_NV
_m[0x8678] = (4,) # GL_MAP2_VERTEX_ATTRIB8_4_NV
_m[0x8679] = (4,) # GL_MAP2_VERTEX_ATTRIB9_4_NV
_m[0x0D10] = (1,) # GL_MAP_COLOR
_m[0x0D11] = (1,) # GL_MAP_STENCIL
_m[0x8844] = (1,) # GL_MATRIX_INDEX_ARRAY_ARB
_m[0x8849] = (1,) # GL_MATRIX_INDEX_ARRAY_POINTER_ARB
_m[0x8846] = (1,) # GL_MATRIX_INDEX_ARRAY_SIZE_ARB
_m[0x8848] = (1,) # GL_MATRIX_INDEX_ARRAY_STRIDE_ARB
_m[0x8847] = (1,) # GL_MATRIX_INDEX_ARRAY_TYPE_ARB
_m[0x0BA0] = (1,) # GL_MATRIX_MODE
_m[0x8840] = (1,) # GL_MATRIX_PALETTE_ARB
_m[0x92FF] = (1,) # GL_MATRIX_STRIDE
_m[0x8073] = (1,) # GL_MAX_3D_TEXTURE_SIZE
_m[0x8073] = (1,) # GL_MAX_3D_TEXTURE_SIZE_EXT
_m[0x8138] = (1,) # GL_MAX_4D_TEXTURE_SIZE_SGIS
_m[0x8405] = (1,) # GL_MAX_ACTIVE_LIGHTS_SGIX
_m[0x88FF] = (1,) # GL_MAX_ARRAY_TEXTURE_LAYERS
_m[0x88FF] = (1,) # GL_MAX_ARRAY_TEXTURE_LAYERS_EXT
_m[0x8360] = (1,) # GL_MAX_ASYNC_DRAW_PIXELS_SGIX
_m[0x832D] = (1,) # GL_MAX_ASYNC_HISTOGRAM_SGIX
_m[0x8361] = (1,) # GL_MAX_ASYNC_READ_PIXELS_SGIX
_m[0x835F] = (1,) # GL_MAX_ASYNC_TEX_IMAGE_SGIX
_m[0x92DC] = (1,) # GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS
_m[0x92D8] = (1,) # GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE
_m[0x0D35] = (1,) # GL_MAX_ATTRIB_STACK_DEPTH
_m[0x8DED] = (1,) # GL_MAX_BINDABLE_UNIFORM_SIZE_EXT
_m[0x0D3B] = (1,) # GL_MAX_CLIENT_ATTRIB_STACK_DEPTH
_m[0x8177] = (1,) # GL_MAX_CLIPMAP_DEPTH_SGIX
_m[0x8178] = (1,) # GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX
_m[0x0D32] = (1,) # GL_MAX_CLIP_DISTANCES
_m[0x0D32] = (1,) # GL_MAX_CLIP_PLANES
_m[0x8CDF] = (1,) # GL_MAX_COLOR_ATTACHMENTS
_m[0x8CDF] = (1,) # GL_MAX_COLOR_ATTACHMENTS_EXT
_m[0x80B3] = (1,) # GL_MAX_COLOR_MATRIX_STACK_DEPTH
_m[0x80B3] = (1,) # GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI
_m[0x910E] = (1,) # GL_MAX_COLOR_TEXTURE_SAMPLES
_m[0x92D7] = (1,) # GL_MAX_COMBINED_ATOMIC_COUNTERS
_m[0x92D1] = (1,) # GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS
_m[0x8266] = (1,) # GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS
_m[0x8A33] = (1,) # GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS
_m[0x8A32] = (1,) # GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS
_m[0x90CF] = (1,) # GL_MAX_COMBINED_IMAGE_UNIFORMS
_m[0x8F39] = (1,) # GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS
_m[0x8F39] = (1,) # GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS_EXT
_m[0x90DC] = (1,) # GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS
_m[0x8E1E] = (1,) # GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS
_m[0x8E1F] = (1,) # GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS
_m[0x8B4D] = (1,) # GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS
_m[0x8B4D] = (1,) # GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB
_m[0x8A2E] = (1,) # GL_MAX_COMBINED_UNIFORM_BLOCKS
_m[0x8A31] = (1,) # GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS
_m[0x8265] = (1,) # GL_MAX_COMPUTE_ATOMIC_COUNTERS
_m[0x8264] = (1,) # GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS
_m[0x91BD] = (1,) # GL_MAX_COMPUTE_IMAGE_UNIFORMS
_m[0x90DB] = (1,) # GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS
_m[0x8262] = (1,) # GL_MAX_COMPUTE_SHARED_MEMORY_SIZE
_m[0x91BC] = (1,) # GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS
_m[0x91BB] = (1,) # GL_MAX_COMPUTE_UNIFORM_BLOCKS
_m[0x8263] = (1,) # GL_MAX_COMPUTE_UNIFORM_COMPONENTS
_m[0x91BE] = (3,) # GL_MAX_COMPUTE_WORK_GROUP_COUNT
_m[0x91BF] = (3,) # GL_MAX_COMPUTE_WORK_GROUP_SIZE
_m[0x801B] = (1,) # GL_MAX_CONVOLUTION_HEIGHT
_m[0x801A] = (1,) # GL_MAX_CONVOLUTION_WIDTH
_m[0x851C] = (1,) # GL_MAX_CUBE_MAP_TEXTURE_SIZE
_m[0x851C] = (1,) # GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB
_m[0x826C] = (1,) # GL_MAX_DEBUG_GROUP_STACK_DEPTH
_m[0x9144] = (1,) # GL_MAX_DEBUG_LOGGED_MESSAGES
_m[0x9144] = (1,) # GL_MAX_DEBUG_LOGGED_MESSAGES_AMD
_m[0x9144] = (1,) # GL_MAX_DEBUG_LOGGED_MESSAGES_ARB
_m[0x9143] = (1,) # GL_MAX_DEBUG_MESSAGE_LENGTH
_m[0x9143] = (1,) # GL_MAX_DEBUG_MESSAGE_LENGTH_AMD
_m[0x9143] = (1,) # GL_MAX_DEBUG_MESSAGE_LENGTH_ARB
_m[0x90D1] = (1,) # GL_MAX_DEEP_3D_TEXTURE_DEPTH_NV
_m[0x90D0] = (2,) # GL_MAX_DEEP_3D_TEXTURE_WIDTH_HEIGHT_NV
_m[0x910F] = (1,) # GL_MAX_DEPTH_TEXTURE_SAMPLES
_m[0x8824] = (1,) # GL_MAX_DRAW_BUFFERS
_m[0x8824] = (1,) # GL_MAX_DRAW_BUFFERS_ARB
_m[0x8824] = (1,) # GL_MAX_DRAW_BUFFERS_ATI
_m[0x88FC] = (1,) # GL_MAX_DUAL_SOURCE_DRAW_BUFFERS
_m[0x80E9] = (1,) # GL_MAX_ELEMENTS_INDICES
_m[0x80E8] = (1,) # GL_MAX_ELEMENTS_VERTICES
_m[0x8D6B] = (1,) # GL_MAX_ELEMENT_INDEX
_m[0x0D30] = (1,) # GL_MAX_EVAL_ORDER
_m[0x812C] = (1,) # GL_MAX_FOG_FUNC_POINTS_SGIS
_m[0x92D6] = (1,) # GL_MAX_FRAGMENT_ATOMIC_COUNTERS
_m[0x92D0] = (1,) # GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS
_m[0x8DE3] = (1,) # GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT
_m[0x90CE] = (1,) # GL_MAX_FRAGMENT_IMAGE_UNIFORMS
_m[0x8E5C] = (1,) # GL_MAX_FRAGMENT_INTERPOLATION_OFFSET
_m[0x8E5C] = (1,) # GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_NV
_m[0x8404] = (1,) # GL_MAX_FRAGMENT_LIGHTS_SGIX
_m[0x8868] = (1,) # GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV
_m[0x90DA] = (1,) # GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS
_m[0x8A2D] = (1,) # GL_MAX_FRAGMENT_UNIFORM_BLOCKS
_m[0x8B49] = (1,) # GL_MAX_FRAGMENT_UNIFORM_COMPONENTS
_m[0x8B49] = (1,) # GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB
_m[0x8DFD] = (1,) # GL_MAX_FRAGMENT_UNIFORM_VECTORS
_m[0x9316] = (1,) # GL_MAX_FRAMEBUFFER_HEIGHT
_m[0x9317] = (1,) # GL_MAX_FRAMEBUFFER_LAYERS
_m[0x9318] = (1,) # GL_MAX_FRAMEBUFFER_SAMPLES
_m[0x9315] = (1,) # GL_MAX_FRAMEBUFFER_WIDTH
_m[0x818D] = (1,) # GL_MAX_FRAMEZOOM_FACTOR_SGIX
_m[0x854D] = (1,) # GL_MAX_GENERAL_COMBINERS_NV
_m[0x92D5] = (1,) # GL_MAX_GEOMETRY_ATOMIC_COUNTERS
_m[0x92CF] = (1,) # GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS
_m[0x8DE4] = (1,) # GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT
_m[0x90CD] = (1,) # GL_MAX_GEOMETRY_IMAGE_UNIFORMS
_m[0x8DE0] = (1,) # GL_MAX_GEOMETRY_OUTPUT_VERTICES
_m[0x8DE0] = (1,) # GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB
_m[0x8DE0] = (1,) # GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT
_m[0x8E5A] = (1,) # GL_MAX_GEOMETRY_PROGRAM_INVOCATIONS_NV
_m[0x8E5A] = (1,) # GL_MAX_GEOMETRY_SHADER_INVOCATIONS
_m[0x90D7] = (1,) # GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS
_m[0x8C29] = (1,) # GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS
_m[0x8C29] = (1,) # GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB
_m[0x8C29] = (1,) # GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT
_m[0x8DE1] = (1,) # GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS
_m[0x8DE1] = (1,) # GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB
_m[0x8DE1] = (1,) # GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT
_m[0x8A2C] = (1,) # GL_MAX_GEOMETRY_UNIFORM_BLOCKS
_m[0x8DDF] = (1,) # GL_MAX_GEOMETRY_UNIFORM_COMPONENTS
_m[0x8DDF] = (1,) # GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB
_m[0x8DDF] = (1,) # GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT
_m[0x8DDD] = (1,) # GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB
_m[0x8DDD] = (1,) # GL_MAX_GEOMETRY_VARYING_COMPONENTS_EXT
_m[0x906D] = (1,) # GL_MAX_IMAGE_SAMPLES
_m[0x906D] = (1,) # GL_MAX_IMAGE_SAMPLES_EXT
_m[0x8F38] = (1,) # GL_MAX_IMAGE_UNITS
_m[0x8F38] = (1,) # GL_MAX_IMAGE_UNITS_EXT
_m[0x9110] = (1,) # GL_MAX_INTEGER_SAMPLES
_m[0x82E8] = (1,) # GL_MAX_LABEL_LENGTH
_m[0x0D31] = (1,) # GL_MAX_LIGHTS
_m[0x0B31] = (1,) # GL_MAX_LIST_NESTING
_m[0x86D6] = (1,) # GL_MAX_MAP_TESSELLATION_NV
_m[0x8841] = (1,) # GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB
_m[0x0D36] = (1,) # GL_MAX_MODELVIEW_STACK_DEPTH
_m[0x8E11] = (1,) # GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV
_m[0x92F6] = (1,) # GL_MAX_NAME_LENGTH
_m[0x0D37] = (1,) # GL_MAX_NAME_STACK_DEPTH
_m[0x92F7] = (1,) # GL_MAX_NUM_ACTIVE_VARIABLES
_m[0x92F8] = (1,) # GL_MAX_NUM_COMPATIBLE_SUBROUTINES
_m[0x87CA] = (1,) # GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT
_m[0x87CE] = (1,) # GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT
_m[0x87CC] = (1,) # GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT
_m[0x87CB] = (1,) # GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT
_m[0x8842] = (1,) # GL_MAX_PALETTE_MATRICES_ARB
_m[0x8E7D] = (1,) # GL_MAX_PATCH_VERTICES
_m[0x0D34] = (1,) # GL_MAX_PIXEL_MAP_TABLE
_m[0x8337] = (1,) # GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT
_m[0x87F1] = (1,) # GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI
_m[0x88B1] = (1,) # GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB
_m[0x880B] = (1,) # GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB
_m[0x88AD] = (1,) # GL_MAX_PROGRAM_ATTRIBS_ARB
_m[0x88F5] = (1,) # GL_MAX_PROGRAM_CALL_DEPTH_NV
_m[0x88B5] = (1,) # GL_MAX_PROGRAM_ENV_PARAMETERS_ARB
_m[0x88F4] = (1,) # GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV
_m[0x88F6] = (1,) # GL_MAX_PROGRAM_IF_DEPTH_NV
_m[0x88A1] = (1,) # GL_MAX_PROGRAM_INSTRUCTIONS_ARB
_m[0x88B4] = (1,) # GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB
_m[0x88F8] = (1,) # GL_MAX_PROGRAM_LOOP_COUNT_NV
_m[0x88F7] = (1,) # GL_MAX_PROGRAM_LOOP_DEPTH_NV
_m[0x862F] = (1,) # GL_MAX_PROGRAM_MATRICES_ARB
_m[0x862E] = (1,) # GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB
_m[0x88B3] = (1,) # GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB
_m[0x880E] = (1,) # GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB
_m[0x88AF] = (1,) # GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB
_m[0x88A3] = (1,) # GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB
_m[0x88AB] = (1,) # GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB
_m[0x88A7] = (1,) # GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB
_m[0x8810] = (1,) # GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB
_m[0x880F] = (1,) # GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB
_m[0x8C27] = (1,) # GL_MAX_PROGRAM_OUTPUT_VERTICES_NV
_m[0x88A9] = (1,) # GL_MAX_PROGRAM_PARAMETERS_ARB
_m[0x88A5] = (1,) # GL_MAX_PROGRAM_TEMPORARIES_ARB
_m[0x8905] = (1,) # GL_MAX_PROGRAM_TEXEL_OFFSET
_m[0x8F9F] = (1,) # GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB
_m[0x8E5F] = (1,) # GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_ARB
_m[0x8E5F] = (1,) # GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_NV
_m[0x880D] = (1,) # GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB
_m[0x880C] = (1,) # GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB
_m[0x8C28] = (1,) # GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV
_m[0x0D38] = (1,) # GL_MAX_PROJECTION_STACK_DEPTH
_m[0x86D7] = (1,) # GL_MAX_RATIONAL_EVAL_ORDER_NV
_m[0x84F8] = (1,) # GL_MAX_RECTANGLE_TEXTURE_SIZE
_m[0x84F8] = (1,) # GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB
_m[0x84F8] = (1,) # GL_MAX_RECTANGLE_TEXTURE_SIZE_NV
_m[0x84E8] = (1,) # GL_MAX_RENDERBUFFER_SIZE
_m[0x84E8] = (1,) # GL_MAX_RENDERBUFFER_SIZE_EXT
_m[0x8D57] = (1,) # GL_MAX_SAMPLES
_m[0x8D57] = (1,) # GL_MAX_SAMPLES_EXT
_m[0x8E59] = (1,) # GL_MAX_SAMPLE_MASK_WORDS
_m[0x8E59] = (1,) # GL_MAX_SAMPLE_MASK_WORDS_NV
_m[0x9111] = (1,) # GL_MAX_SERVER_WAIT_TIMEOUT
_m[0x90DE] = (1,) # GL_MAX_SHADER_STORAGE_BLOCK_SIZE
_m[0x90DD] = (1,) # GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS
_m[0x8504] = (1,) # GL_MAX_SHININESS_NV
_m[0x919A] = (1,) # GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS
_m[0x8505] = (1,) # GL_MAX_SPOT_EXPONENT_NV
_m[0x8DE7] = (1,) # GL_MAX_SUBROUTINES
_m[0x8DE8] = (1,) # GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS
_m[0x92D3] = (1,) # GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS
_m[0x92CD] = (1,) # GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS
_m[0x90CB] = (1,) # GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS
_m[0x886C] = (1,) # GL_MAX_TESS_CONTROL_INPUT_COMPONENTS
_m[0x8E83] = (1,) # GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS
_m[0x90D8] = (1,) # GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS
_m[0x8E81] = (1,) # GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS
_m[0x8E85] = (1,) # GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS
_m[0x8E89] = (1,) # GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS
_m[0x8E7F] = (1,) # GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS
_m[0x92D4] = (1,) # GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS
_m[0x92CE] = (1,) # GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS
_m[0x90CC] = (1,) # GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS
_m[0x886D] = (1,) # GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS
_m[0x8E86] = (1,) # GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS
_m[0x90D9] = (1,) # GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS
_m[0x8E82] = (1,) # GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS
_m[0x8E8A] = (1,) # GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS
_m[0x8E80] = (1,) # GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS
_m[0x8E7E] = (1,) # GL_MAX_TESS_GEN_LEVEL
_m[0x8E84] = (1,) # GL_MAX_TESS_PATCH_COMPONENTS
_m[0x8C2B] = (1,) # GL_MAX_TEXTURE_BUFFER_SIZE
_m[0x8C2B] = (1,) # GL_MAX_TEXTURE_BUFFER_SIZE_ARB
_m[0x8C2B] = (1,) # GL_MAX_TEXTURE_BUFFER_SIZE_EXT
_m[0x8871] = (1,) # GL_MAX_TEXTURE_COORDS
_m[0x8871] = (1,) # GL_MAX_TEXTURE_COORDS_ARB
_m[0x8871] = (1,) # GL_MAX_TEXTURE_COORDS_NV
_m[0x8872] = (1,) # GL_MAX_TEXTURE_IMAGE_UNITS
_m[0x8872] = (1,) # GL_MAX_TEXTURE_IMAGE_UNITS_ARB
_m[0x8872] = (1,) # GL_MAX_TEXTURE_IMAGE_UNITS_NV
_m[0x84FD] = (1,) # GL_MAX_TEXTURE_LOD_BIAS
_m[0x84FD] = (1,) # GL_MAX_TEXTURE_LOD_BIAS_EXT
_m[0x84FF] = (1,) # GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT
_m[0x0D33] = (1,) # GL_MAX_TEXTURE_SIZE
_m[0x0D39] = (1,) # GL_MAX_TEXTURE_STACK_DEPTH
_m[0x84E2] = (1,) # GL_MAX_TEXTURE_UNITS
_m[0x84E2] = (1,) # GL_MAX_TEXTURE_UNITS_ARB
_m[0x862F] = (1,) # GL_MAX_TRACK_MATRICES_NV
_m[0x862E] = (1,) # GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV
_m[0x8E70] = (1,) # GL_MAX_TRANSFORM_FEEDBACK_BUFFERS
_m[0x8A30] = (1,) # GL_MAX_UNIFORM_BLOCK_SIZE
_m[0x8A2F] = (1,) # GL_MAX_UNIFORM_BUFFER_BINDINGS
_m[0x826E] = (1,) # GL_MAX_UNIFORM_LOCATIONS
_m[0x8B4B] = (1,) # GL_MAX_VARYING_COMPONENTS
_m[0x8B4B] = (1,) # GL_MAX_VARYING_COMPONENTS_EXT
_m[0x8B4B] = (1,) # GL_MAX_VARYING_FLOATS
_m[0x8B4B] = (1,) # GL_MAX_VARYING_FLOATS_ARB
_m[0x8DFC] = (1,) # GL_MAX_VARYING_VECTORS
_m[0x8520] = (1,) # GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV
_m[0x92D2] = (1,) # GL_MAX_VERTEX_ATOMIC_COUNTERS
_m[0x92CC] = (1,) # GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS
_m[0x8869] = (1,) # GL_MAX_VERTEX_ATTRIBS
_m[0x8869] = (1,) # GL_MAX_VERTEX_ATTRIBS_ARB
_m[0x82DA] = (1,) # GL_MAX_VERTEX_ATTRIB_BINDINGS
_m[0x82D9] = (1,) # GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET
_m[0x8DE2] = (1,) # GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT
_m[0x90CA] = (1,) # GL_MAX_VERTEX_IMAGE_UNIFORMS
_m[0x87C5] = (1,) # GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT
_m[0x87C7] = (1,) # GL_MAX_VERTEX_SHADER_INVARIANTS_EXT
_m[0x87C9] = (1,) # GL_MAX_VERTEX_SHADER_LOCALS_EXT
_m[0x87C8] = (1,) # GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT
_m[0x90D6] = (1,) # GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS
_m[0x87C6] = (1,) # GL_MAX_VERTEX_SHADER_VARIANTS_EXT
_m[0x8E71] = (1,) # GL_MAX_VERTEX_STREAMS
_m[0x876B] = (1,) # GL_MAX_VERTEX_STREAMS_ATI
_m[0x8B4C] = (1,) # GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS
_m[0x8B4C] = (1,) # GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB
_m[0x8A2B] = (1,) # GL_MAX_VERTEX_UNIFORM_BLOCKS
_m[0x8B4A] = (1,) # GL_MAX_VERTEX_UNIFORM_COMPONENTS
_m[0x8B4A] = (1,) # GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB
_m[0x8DFB] = (1,) # GL_MAX_VERTEX_UNIFORM_VECTORS
_m[0x86A4] = (1,) # GL_MAX_VERTEX_UNITS_ARB
_m[0x8DDE] = (1,) # GL_MAX_VERTEX_VARYING_COMPONENTS_ARB
_m[0x8DDE] = (1,) # GL_MAX_VERTEX_VARYING_COMPONENTS_EXT
_m[0x825B] = (1,) # GL_MAX_VIEWPORTS
_m[0x0D3A] = (2,) # GL_MAX_VIEWPORT_DIMS
_m[0x802E] = (1,) # GL_MINMAX
_m[0x802E] = (1,) # GL_MINMAX_EXT
_m[0x802F] = (1,) # GL_MINMAX_FORMAT
_m[0x802F] = (1,) # GL_MINMAX_FORMAT_EXT
_m[0x8030] = (1,) # GL_MINMAX_SINK
_m[0x8030] = (1,) # GL_MINMAX_SINK_EXT
_m[0x821C] = (1,) # GL_MINOR_VERSION
_m[0x8904] = (1,) # GL_MIN_PROGRAM_TEXEL_OFFSET
_m[0x0BA6] = (4,4) # GL_MODELVIEW0_MATRIX_EXT
_m[0x0BA3] = (1,) # GL_MODELVIEW0_STACK_DEPTH_EXT
_m[0x872A] = (4,4) # GL_MODELVIEW10_ARB
_m[0x872B] = (4,4) # GL_MODELVIEW11_ARB
_m[0x872C] = (4,4) # GL_MODELVIEW12_ARB
_m[0x872D] = (4,4) # GL_MODELVIEW13_ARB
_m[0x872E] = (4,4) # GL_MODELVIEW14_ARB
_m[0x872F] = (4,4) # GL_MODELVIEW15_ARB
_m[0x8730] = (4,4) # GL_MODELVIEW16_ARB
_m[0x8731] = (4,4) # GL_MODELVIEW17_ARB
_m[0x8732] = (4,4) # GL_MODELVIEW18_ARB
_m[0x8733] = (4,4) # GL_MODELVIEW19_ARB
_m[0x8506] = (4,4) # GL_MODELVIEW1_MATRIX_EXT
_m[0x8502] = (1,) # GL_MODELVIEW1_STACK_DEPTH_EXT
_m[0x8734] = (4,4) # GL_MODELVIEW20_ARB
_m[0x8735] = (4,4) # GL_MODELVIEW21_ARB
_m[0x8736] = (4,4) # GL_MODELVIEW22_ARB
_m[0x8737] = (4,4) # GL_MODELVIEW23_ARB
_m[0x8738] = (4,4) # GL_MODELVIEW24_ARB
_m[0x8739] = (4,4) # GL_MODELVIEW25_ARB
_m[0x873A] = (4,4) # GL_MODELVIEW26_ARB
_m[0x873B] = (4,4) # GL_MODELVIEW27_ARB
_m[0x873C] = (4,4) # GL_MODELVIEW28_ARB
_m[0x873D] = (4,4) # GL_MODELVIEW29_ARB
_m[0x8722] = (4,4) # GL_MODELVIEW2_ARB
_m[0x873E] = (4,4) # GL_MODELVIEW30_ARB
_m[0x873F] = (4,4) # GL_MODELVIEW31_ARB
_m[0x8723] = (4,4) # GL_MODELVIEW3_ARB
_m[0x8724] = (4,4) # GL_MODELVIEW4_ARB
_m[0x8725] = (4,4) # GL_MODELVIEW5_ARB
_m[0x8726] = (4,4) # GL_MODELVIEW6_ARB
_m[0x8727] = (4,4) # GL_MODELVIEW7_ARB
_m[0x8728] = (4,4) # GL_MODELVIEW8_ARB
_m[0x8729] = (4,4) # GL_MODELVIEW9_ARB
_m[0x0BA6] = (4, 4) # GL_MODELVIEW_MATRIX
_m[0x0BA3] = (1,) # GL_MODELVIEW_STACK_DEPTH
_m[0x809D] = (1,) # GL_MULTISAMPLE
_m[0x86B2] = (1,) # GL_MULTISAMPLE_3DFX
_m[0x809D] = (1,) # GL_MULTISAMPLE_ARB
_m[0x809D] = (1,) # GL_MULTISAMPLE_EXT
_m[0x8534] = (1,) # GL_MULTISAMPLE_FILTER_HINT_NV
_m[0x809D] = (1,) # GL_MULTISAMPLE_SGIS
_m[0x8DEA] = (1,) # GL_NAMED_STRING_TYPE_ARB
_m[0x92F9] = (1,) # GL_NAME_LENGTH
_m[0x0D70] = (1,) # GL_NAME_STACK_DEPTH
_m[0x0BA1] = (1,) # GL_NORMALIZE
_m[0x8075] = (1,) # GL_NORMAL_ARRAY
_m[0x8897] = (1,) # GL_NORMAL_ARRAY_BUFFER_BINDING
_m[0x8897] = (1,) # GL_NORMAL_ARRAY_BUFFER_BINDING_ARB
_m[0x8080] = (1,) # GL_NORMAL_ARRAY_COUNT_EXT
_m[0x8075] = (1,) # GL_NORMAL_ARRAY_EXT
_m[0x8F2C] = (1,) # GL_NORMAL_ARRAY_LENGTH_NV
_m[0x808F] = (1,) # GL_NORMAL_ARRAY_POINTER
_m[0x807F] = (1,) # GL_NORMAL_ARRAY_STRIDE
_m[0x807F] = (1,) # GL_NORMAL_ARRAY_STRIDE_EXT
_m[0x807E] = (1,) # GL_NORMAL_ARRAY_TYPE
_m[0x807E] = (1,) # GL_NORMAL_ARRAY_TYPE_EXT
_m[0x8261] = (1,) # GL_NO_RESET_NOTIFICATION_ARB
_m[0x9304] = (1,) # GL_NUM_ACTIVE_VARIABLES
_m[0x8E4A] = (1,) # GL_NUM_COMPATIBLE_SUBROUTINES
_m[0x86A2] = (1,) # GL_NUM_COMPRESSED_TEXTURE_FORMATS
_m[0x86A2] = (1,) # GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB
_m[0x821D] = (1,) # GL_NUM_EXTENSIONS
_m[0x896F] = (1,) # GL_NUM_FRAGMENT_CONSTANTS_ATI
_m[0x896E] = (1,) # GL_NUM_FRAGMENT_REGISTERS_ATI
_m[0x854E] = (1,) # GL_NUM_GENERAL_COMBINERS_NV
_m[0x8973] = (1,) # GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI
_m[0x8971] = (1,) # GL_NUM_INSTRUCTIONS_PER_PASS_ATI
_m[0x8972] = (1,) # GL_NUM_INSTRUCTIONS_TOTAL_ATI
_m[0x8974] = (1,) # GL_NUM_LOOPBACK_COMPONENTS_ATI
_m[0x8970] = (1,) # GL_NUM_PASSES_ATI
_m[0x87FE] = (1,) # GL_NUM_PROGRAM_BINARY_FORMATS
_m[0x8DF9] = (1,) # GL_NUM_SHADER_BINARY_FORMATS
_m[0x81F7] = (7,) # GL_OBJECT_LINE_SGIS
_m[0x2501] = (4,) # GL_OBJECT_PLANE
_m[0x81F5] = (4,) # GL_OBJECT_POINT_SGIS
_m[0x8B4E] = (1,) # GL_OBJECT_TYPE_ARB
_m[0x8165] = (1,) # GL_OCCLUSION_TEST_HP
_m[0x8166] = (1,) # GL_OCCLUSION_TEST_RESULT_HP
_m[0x92FC] = (1,) # GL_OFFSET
_m[0x86E3] = (1,) # GL_OFFSET_TEXTURE_BIAS_NV
_m[0x86E1] = (4,) # GL_OFFSET_TEXTURE_MATRIX_NV
_m[0x86E2] = (1,) # GL_OFFSET_TEXTURE_SCALE_NV
_m[0x8598] = (1,) # GL_OPERAND0_ALPHA
_m[0x8590] = (1,) # GL_OPERAND0_RGB
_m[0x8599] = (1,) # GL_OPERAND1_ALPHA
_m[0x8591] = (1,) # GL_OPERAND1_RGB
_m[0x859A] = (1,) # GL_OPERAND2_ALPHA
_m[0x8592] = (1,) # GL_OPERAND2_RGB
_m[0x859B] = (1,) # GL_OPERAND3_ALPHA_NV
_m[0x8593] = (1,) # GL_OPERAND3_RGB_NV
_m[0x0D05] = (1,) # GL_PACK_ALIGNMENT
_m[0x800E] = (1,) # GL_PACK_CMYK_HINT_EXT
_m[0x912D] = (1,) # GL_PACK_COMPRESSED_BLOCK_DEPTH
_m[0x912C] = (1,) # GL_PACK_COMPRESSED_BLOCK_HEIGHT
_m[0x912E] = (1,) # GL_PACK_COMPRESSED_BLOCK_SIZE
_m[0x912B] = (1,) # GL_PACK_COMPRESSED_BLOCK_WIDTH
_m[0x8131] = (1,) # GL_PACK_IMAGE_DEPTH_SGIS
_m[0x806C] = (1,) # GL_PACK_IMAGE_HEIGHT
_m[0x806C] = (1,) # GL_PACK_IMAGE_HEIGHT_EXT
_m[0x8758] = (1,) # GL_PACK_INVERT_MESA
_m[0x0D01] = (1,) # GL_PACK_LSB_FIRST
_m[0x8984] = (1,) # GL_PACK_RESAMPLE_OML
_m[0x842C] = (1,) # GL_PACK_RESAMPLE_SGIX
_m[0x8A15] = (1,) # GL_PACK_ROW_BYTES_APPLE
_m[0x0D02] = (1,) # GL_PACK_ROW_LENGTH
_m[0x806B] = (1,) # GL_PACK_SKIP_IMAGES
_m[0x806B] = (1,) # GL_PACK_SKIP_IMAGES_EXT
_m[0x0D04] = (1,) # GL_PACK_SKIP_PIXELS
_m[0x0D03] = (1,) # GL_PACK_SKIP_ROWS
_m[0x8130] = (1,) # GL_PACK_SKIP_VOLUMES_SGIS
_m[0x0D00] = (1,) # GL_PACK_SWAP_BYTES
_m[0x83F4] = (1,) # GL_PARALLEL_ARRAYS_INTEL
_m[0x8E73] = (1,) # GL_PATCH_DEFAULT_INNER_LEVEL
_m[0x8E74] = (1,) # GL_PATCH_DEFAULT_OUTER_LEVEL
_m[0x8E72] = (1,) # GL_PATCH_VERTICES
_m[0x90AC] = (1,) # GL_PATH_FOG_GEN_MODE_NV
_m[0x90BD] = (1,) # GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV
_m[0x90BE] = (1,) # GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV
_m[0x90B7] = (1,) # GL_PATH_STENCIL_FUNC_NV
_m[0x90B8] = (1,) # GL_PATH_STENCIL_REF_NV
_m[0x90B9] = (1,) # GL_PATH_STENCIL_VALUE_MASK_NV
_m[0x0C50] = (1,) # GL_PERSPECTIVE_CORRECTION_HINT
_m[0x8535] = (1,) # GL_PER_STAGE_CONSTANTS_NV
_m[0x8864] = (1,) # GL_PIXEL_COUNTER_BITS_NV
_m[0x8355] = (1,) # GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS
_m[0x8354] = (1,) # GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS
_m[0x8356] = (1,) # GL_PIXEL_GROUP_COLOR_SGIS
_m[0x0C79] = (_L(0xCB9),) # GL_PIXEL_MAP_A_TO_A
_m[0x0CB9] = (1,) # GL_PIXEL_MAP_A_TO_A_SIZE
_m[0x0C78] = (_L(0xCB8),) # GL_PIXEL_MAP_B_TO_B
_m[0x0CB8] = (1,) # GL_PIXEL_MAP_B_TO_B_SIZE
_m[0x0C77] = (_L(0xCB7),) # GL_PIXEL_MAP_G_TO_G
_m[0x0CB7] = (1,) # GL_PIXEL_MAP_G_TO_G_SIZE
_m[0x0C75] = (_L(0xCB5),) # GL_PIXEL_MAP_I_TO_A
_m[0x0CB5] = (1,) # GL_PIXEL_MAP_I_TO_A_SIZE
_m[0x0C74] = (_L(0xCB4),) # GL_PIXEL_MAP_I_TO_B
_m[0x0CB4] = (1,) # GL_PIXEL_MAP_I_TO_B_SIZE
_m[0x0C73] = (_L(0xCB3),) # GL_PIXEL_MAP_I_TO_G
_m[0x0CB3] = (1,) # GL_PIXEL_MAP_I_TO_G_SIZE
_m[0x0C70] = (_L(0xCB0),) # GL_PIXEL_MAP_I_TO_I
_m[0x0CB0] = (1,) # GL_PIXEL_MAP_I_TO_I_SIZE
_m[0x0C72] = (_L(0xCB2),) # GL_PIXEL_MAP_I_TO_R
_m[0x0CB2] = (1,) # GL_PIXEL_MAP_I_TO_R_SIZE
_m[0x0C76] = (_L(0xCB6),) # GL_PIXEL_MAP_R_TO_R
_m[0x0CB6] = (1,) # GL_PIXEL_MAP_R_TO_R_SIZE
_m[0x0C71] = (_L(0xCB1),) # GL_PIXEL_MAP_S_TO_S
_m[0x0CB1] = (1,) # GL_PIXEL_MAP_S_TO_S_SIZE
_m[0x88ED] = (1,) # GL_PIXEL_PACK_BUFFER_BINDING
_m[0x88ED] = (1,) # GL_PIXEL_PACK_BUFFER_BINDING_ARB
_m[0x88ED] = (1,) # GL_PIXEL_PACK_BUFFER_BINDING_EXT
_m[0x8353] = (1,) # GL_PIXEL_TEXTURE_SGIS
_m[0x832B] = (1,) # GL_PIXEL_TEX_GEN_MODE_SGIX
_m[0x8139] = (1,) # GL_PIXEL_TEX_GEN_SGIX
_m[0x813E] = (1,) # GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX
_m[0x813F] = (1,) # GL_PIXEL_TILE_CACHE_INCREMENT_SGIX
_m[0x8145] = (1,) # GL_PIXEL_TILE_CACHE_SIZE_SGIX
_m[0x8144] = (1,) # GL_PIXEL_TILE_GRID_DEPTH_SGIX
_m[0x8143] = (1,) # GL_PIXEL_TILE_GRID_HEIGHT_SGIX
_m[0x8142] = (1,) # GL_PIXEL_TILE_GRID_WIDTH_SGIX
_m[0x8141] = (1,) # GL_PIXEL_TILE_HEIGHT_SGIX
_m[0x8140] = (1,) # GL_PIXEL_TILE_WIDTH_SGIX
_m[0x8336] = (1,) # GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT
_m[0x88EF] = (1,) # GL_PIXEL_UNPACK_BUFFER_BINDING
_m[0x88EF] = (1,) # GL_PIXEL_UNPACK_BUFFER_BINDING_ARB
_m[0x88EF] = (1,) # GL_PIXEL_UNPACK_BUFFER_BINDING_EXT
_m[0x87F3] = (1,) # GL_PN_TRIANGLES_NORMAL_MODE_ATI
_m[0x87F2] = (1,) # GL_PN_TRIANGLES_POINT_MODE_ATI
_m[0x87F4] = (1,) # GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI
_m[0x8129] = (3,) # GL_POINT_DISTANCE_ATTENUATION
_m[0x8129] = (3,) # GL_POINT_DISTANCE_ATTENUATION_ARB
_m[0x8128] = (1,) # GL_POINT_FADE_THRESHOLD_SIZE
_m[0x8128] = (1,) # GL_POINT_FADE_THRESHOLD_SIZE_ARB
_m[0x8128] = (1,) # GL_POINT_FADE_THRESHOLD_SIZE_SGIS
_m[0x0B11] = (1,) # GL_POINT_SIZE
_m[0x0B13] = (1,) # GL_POINT_SIZE_GRANULARITY
_m[0x8127] = (1,) # GL_POINT_SIZE_MAX
_m[0x8127] = (1,) # GL_POINT_SIZE_MAX_ARB
_m[0x8127] = (1,) # GL_POINT_SIZE_MAX_SGIS
_m[0x8126] = (1,) # GL_POINT_SIZE_MIN
_m[0x8126] = (1,) # GL_POINT_SIZE_MIN_ARB
_m[0x8126] = (1,) # GL_POINT_SIZE_MIN_SGIS
_m[0x0B12] = (2,) # GL_POINT_SIZE_RANGE
_m[0x0B10] = (1,) # GL_POINT_SMOOTH
_m[0x0C51] = (1,) # GL_POINT_SMOOTH_HINT
_m[0x8861] = (1,) # GL_POINT_SPRITE
_m[0x8861] = (1,) # GL_POINT_SPRITE_ARB
_m[0x8CA0] = (1,) # GL_POINT_SPRITE_COORD_ORIGIN
_m[0x8861] = (1,) # GL_POINT_SPRITE_NV
_m[0x8863] = (1,) # GL_POINT_SPRITE_R_MODE_NV
_m[0x0B40] = (2,) # GL_POLYGON_MODE
_m[0x8039] = (1,) # GL_POLYGON_OFFSET_BIAS_EXT
_m[0x8037] = (1,) # GL_POLYGON_OFFSET_EXT
_m[0x8038] = (1,) # GL_POLYGON_OFFSET_FACTOR
_m[0x8038] = (1,) # GL_POLYGON_OFFSET_FACTOR_EXT
_m[0x8037] = (1,) # GL_POLYGON_OFFSET_FILL
_m[0x2A02] = (1,) # GL_POLYGON_OFFSET_LINE
_m[0x2A01] = (1,) # GL_POLYGON_OFFSET_POINT
_m[0x2A00] = (1,) # GL_POLYGON_OFFSET_UNITS
_m[0x0B41] = (1,) # GL_POLYGON_SMOOTH
_m[0x0C53] = (1,) # GL_POLYGON_SMOOTH_HINT
_m[0x0B42] = (1,) # GL_POLYGON_STIPPLE
_m[0x1203] = (4,) # GL_POSITION
_m[0x80BB] = (1,) # GL_POST_COLOR_MATRIX_ALPHA_BIAS
_m[0x80BB] = (1,) # GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI
_m[0x80B7] = (1,) # GL_POST_COLOR_MATRIX_ALPHA_SCALE
_m[0x80B7] = (1,) # GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI
_m[0x80BA] = (1,) # GL_POST_COLOR_MATRIX_BLUE_BIAS
_m[0x80BA] = (1,) # GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI
_m[0x80B6] = (1,) # GL_POST_COLOR_MATRIX_BLUE_SCALE
_m[0x80B6] = (1,) # GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI
_m[0x80D2] = (1,) # GL_POST_COLOR_MATRIX_COLOR_TABLE
_m[0x80D2] = (1,) # GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI
_m[0x80B9] = (1,) # GL_POST_COLOR_MATRIX_GREEN_BIAS
_m[0x80B9] = (1,) # GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI
_m[0x80B5] = (1,) # GL_POST_COLOR_MATRIX_GREEN_SCALE
_m[0x80B5] = (1,) # GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI
_m[0x80B8] = (1,) # GL_POST_COLOR_MATRIX_RED_BIAS
_m[0x80B8] = (1,) # GL_POST_COLOR_MATRIX_RED_BIAS_SGI
_m[0x80B4] = (1,) # GL_POST_COLOR_MATRIX_RED_SCALE
_m[0x80B4] = (1,) # GL_POST_COLOR_MATRIX_RED_SCALE_SGI
_m[0x8023] = (1,) # GL_POST_CONVOLUTION_ALPHA_BIAS
_m[0x8023] = (1,) # GL_POST_CONVOLUTION_ALPHA_BIAS_EXT
_m[0x801F] = (1,) # GL_POST_CONVOLUTION_ALPHA_SCALE
_m[0x801F] = (1,) # GL_POST_CONVOLUTION_ALPHA_SCALE_EXT
_m[0x8022] = (1,) # GL_POST_CONVOLUTION_BLUE_BIAS
_m[0x8022] = (1,) # GL_POST_CONVOLUTION_BLUE_BIAS_EXT
_m[0x801E] = (1,) # GL_POST_CONVOLUTION_BLUE_SCALE
_m[0x801E] = (1,) # GL_POST_CONVOLUTION_BLUE_SCALE_EXT
_m[0x80D1] = (1,) # GL_POST_CONVOLUTION_COLOR_TABLE
_m[0x80D1] = (1,) # GL_POST_CONVOLUTION_COLOR_TABLE_SGI
_m[0x8021] = (1,) # GL_POST_CONVOLUTION_GREEN_BIAS
_m[0x8021] = (1,) # GL_POST_CONVOLUTION_GREEN_BIAS_EXT
_m[0x801D] = (1,) # GL_POST_CONVOLUTION_GREEN_SCALE
_m[0x801D] = (1,) # GL_POST_CONVOLUTION_GREEN_SCALE_EXT
_m[0x8020] = (1,) # GL_POST_CONVOLUTION_RED_BIAS
_m[0x8020] = (1,) # GL_POST_CONVOLUTION_RED_BIAS_EXT
_m[0x801C] = (1,) # GL_POST_CONVOLUTION_RED_SCALE
_m[0x801C] = (1,) # GL_POST_CONVOLUTION_RED_SCALE_EXT
_m[0x817B] = (1,) # GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX
_m[0x8179] = (1,) # GL_POST_TEXTURE_FILTER_BIAS_SGIX
_m[0x817C] = (1,) # GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX
_m[0x817A] = (1,) # GL_POST_TEXTURE_FILTER_SCALE_SGIX
_m[0x86E4] = (1,) # GL_PREVIOUS_TEXTURE_INPUT_NV
_m[0x8F9D] = (1,) # GL_PRIMITIVE_RESTART
_m[0x8D69] = (1,) # GL_PRIMITIVE_RESTART_FIXED_INDEX
_m[0x8F9E] = (1,) # GL_PRIMITIVE_RESTART_INDEX
_m[0x8559] = (1,) # GL_PRIMITIVE_RESTART_INDEX_NV
_m[0x8558] = (1,) # GL_PRIMITIVE_RESTART_NV
_m[0x88B0] = (1,) # GL_PROGRAM_ADDRESS_REGISTERS_ARB
_m[0x8805] = (1,) # GL_PROGRAM_ALU_INSTRUCTIONS_ARB
_m[0x88AC] = (1,) # GL_PROGRAM_ATTRIBS_ARB
_m[0x8741] = (1,) # GL_PROGRAM_BINARY_LENGTH
_m[0x8677] = (1,) # GL_PROGRAM_BINDING_ARB
_m[0x864B] = (1,) # GL_PROGRAM_ERROR_POSITION_ARB
_m[0x864B] = (1,) # GL_PROGRAM_ERROR_POSITION_NV
_m[0x8874] = (1,) # GL_PROGRAM_ERROR_STRING_ARB
_m[0x8876] = (1,) # GL_PROGRAM_FORMAT_ARB
_m[0x88A0] = (1,) # GL_PROGRAM_INSTRUCTIONS_ARB
_m[0x8627] = (1,) # GL_PROGRAM_LENGTH_ARB
_m[0x88B2] = (1,) # GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB
_m[0x8808] = (1,) # GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB
_m[0x88AE] = (1,) # GL_PROGRAM_NATIVE_ATTRIBS_ARB
_m[0x88A2] = (1,) # GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB
_m[0x88AA] = (1,) # GL_PROGRAM_NATIVE_PARAMETERS_ARB
_m[0x88A6] = (1,) # GL_PROGRAM_NATIVE_TEMPORARIES_ARB
_m[0x880A] = (1,) # GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB
_m[0x8809] = (1,) # GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB
_m[0x8B40] = (1,) # GL_PROGRAM_OBJECT_ARB
_m[0x88A8] = (1,) # GL_PROGRAM_PARAMETERS_ARB
_m[0x825A] = (1,) # GL_PROGRAM_PIPELINE_BINDING
_m[0x8642] = (1,) # GL_PROGRAM_POINT_SIZE
_m[0x8642] = (1,) # GL_PROGRAM_POINT_SIZE_ARB
_m[0x8642] = (1,) # GL_PROGRAM_POINT_SIZE_EXT
_m[0x8647] = (1,) # GL_PROGRAM_RESIDENT_NV
_m[0x8628] = (1,) # GL_PROGRAM_STRING_ARB
_m[0x8646] = (1,) # GL_PROGRAM_TARGET_NV
_m[0x88A4] = (1,) # GL_PROGRAM_TEMPORARIES_ARB
_m[0x8807] = (1,) # GL_PROGRAM_TEX_INDIRECTIONS_ARB
_m[0x8806] = (1,) # GL_PROGRAM_TEX_INSTRUCTIONS_ARB
_m[0x88B6] = (1,) # GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB
_m[0x0BA7] = (4, 4) # GL_PROJECTION_MATRIX
_m[0x0BA4] = (1,) # GL_PROJECTION_STACK_DEPTH
_m[0x8E4F] = (1,) # GL_PROVOKING_VERTEX
_m[0x8E4F] = (1,) # GL_PROVOKING_VERTEX_EXT
_m[0x1209] = (1,) # GL_QUADRATIC_ATTENUATION
_m[0x8E4C] = (1,) # GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION
_m[0x8E4C] = (1,) # GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION_EXT
_m[0x9193] = (1,) # GL_QUERY_BUFFER_BINDING_AMD
_m[0x8864] = (1,) # GL_QUERY_COUNTER_BITS
_m[0x8864] = (1,) # GL_QUERY_COUNTER_BITS_ARB
_m[0x8866] = (1,) # GL_QUERY_RESULT
_m[0x8867] = (1,) # GL_QUERY_RESULT_AVAILABLE
_m[0x8C89] = (1,) # GL_RASTERIZER_DISCARD
_m[0x19262] = (1,) # GL_RASTER_POSITION_UNCLIPPED_IBM
_m[0x0C02] = (1,) # GL_READ_BUFFER
_m[0x0C02] = (1,) # GL_READ_BUFFER_EXT
_m[0x0C02] = (1,) # GL_READ_BUFFER_NV
_m[0x8CA8] = (1,) # GL_READ_FRAMEBUFFER
_m[0x8CAA] = (1,) # GL_READ_FRAMEBUFFER_BINDING
_m[0x887B] = (1,) # GL_READ_PIXEL_DATA_RANGE_LENGTH_NV
_m[0x0D15] = (1,) # GL_RED_BIAS
_m[0x0D52] = (1,) # GL_RED_BITS
_m[0x8564] = (1,) # GL_RED_MAX_CLAMP_INGR
_m[0x8560] = (1,) # GL_RED_MIN_CLAMP_INGR
_m[0x0D14] = (1,) # GL_RED_SCALE
_m[0x930B] = (1,) # GL_REFERENCED_BY_COMPUTE_SHADER
_m[0x930A] = (1,) # GL_REFERENCED_BY_FRAGMENT_SHADER
_m[0x9309] = (1,) # GL_REFERENCED_BY_GEOMETRY_SHADER
_m[0x9307] = (1,) # GL_REFERENCED_BY_TESS_CONTROL_SHADER
_m[0x9308] = (1,) # GL_REFERENCED_BY_TESS_EVALUATION_SHADER
_m[0x9306] = (1,) # GL_REFERENCED_BY_VERTEX_SHADER
_m[0x817E] = (4,) # GL_REFERENCE_PLANE_EQUATION_SGIX
_m[0x817D] = (1,) # GL_REFERENCE_PLANE_SGIX
_m[0x8522] = (1,) # GL_REGISTER_COMBINERS_NV
_m[0x8D53] = (1,) # GL_RENDERBUFFER_ALPHA_SIZE
_m[0x8CA7] = (1,) # GL_RENDERBUFFER_BINDING
_m[0x8CA7] = (1,) # GL_RENDERBUFFER_BINDING_EXT
_m[0x8D52] = (1,) # GL_RENDERBUFFER_BLUE_SIZE
_m[0x8D54] = (1,) # GL_RENDERBUFFER_DEPTH_SIZE
_m[0x87FD] = (1,) # GL_RENDERBUFFER_FREE_MEMORY_ATI
_m[0x8D51] = (1,) # GL_RENDERBUFFER_GREEN_SIZE
_m[0x8D43] = (1,) # GL_RENDERBUFFER_HEIGHT
_m[0x8D44] = (1,) # GL_RENDERBUFFER_INTERNAL_FORMAT
_m[0x8D50] = (1,) # GL_RENDERBUFFER_RED_SIZE
_m[0x8CAB] = (1,) # GL_RENDERBUFFER_SAMPLES
_m[0x8D55] = (1,) # GL_RENDERBUFFER_STENCIL_SIZE
_m[0x8D42] = (1,) # GL_RENDERBUFFER_WIDTH
_m[0x1F01] = (1,) # GL_RENDERER
_m[0x0C40] = (1,) # GL_RENDER_MODE
_m[0x85C2] = (1,) # GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN
_m[0x85C1] = (1,) # GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN
_m[0x81D8] = (1,) # GL_REPLACEMENT_CODE_SUN
_m[0x803A] = (1,) # GL_RESCALE_NORMAL
_m[0x803A] = (1,) # GL_RESCALE_NORMAL_EXT
_m[0x8256] = (1,) # GL_RESET_NOTIFICATION_STRATEGY_ARB
_m[0x8820] = (1,) # GL_RGBA_FLOAT_MODE_ARB
_m[0x8820] = (1,) # GL_RGBA_FLOAT_MODE_ATI
_m[0x8D9E] = (1,) # GL_RGBA_INTEGER_MODE_EXT
_m[0x0C31] = (1,) # GL_RGBA_MODE
_m[0x8C3C] = (1,) # GL_RGBA_SIGNED_COMPONENTS_EXT
_m[0x86D9] = (1,) # GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV
_m[0x8573] = (1,) # GL_RGB_SCALE
_m[0x8919] = (1,) # GL_SAMPLER_BINDING
_m[0x80A9] = (1,) # GL_SAMPLES
_m[0x86B4] = (1,) # GL_SAMPLES_3DFX
_m[0x80A9] = (1,) # GL_SAMPLES_ARB
_m[0x80A9] = (1,) # GL_SAMPLES_SGIS
_m[0x809E] = (1,) # GL_SAMPLE_ALPHA_TO_COVERAGE
_m[0x809E] = (1,) # GL_SAMPLE_ALPHA_TO_COVERAGE_ARB
_m[0x809E] = (1,) # GL_SAMPLE_ALPHA_TO_MASK_EXT
_m[0x809E] = (1,) # GL_SAMPLE_ALPHA_TO_MASK_SGIS
_m[0x809F] = (1,) # GL_SAMPLE_ALPHA_TO_ONE
_m[0x809F] = (1,) # GL_SAMPLE_ALPHA_TO_ONE_ARB
_m[0x809F] = (1,) # GL_SAMPLE_ALPHA_TO_ONE_EXT
_m[0x809F] = (1,) # GL_SAMPLE_ALPHA_TO_ONE_SGIS
_m[0x80A8] = (1,) # GL_SAMPLE_BUFFERS
_m[0x86B3] = (1,) # GL_SAMPLE_BUFFERS_3DFX
_m[0x80A8] = (1,) # GL_SAMPLE_BUFFERS_ARB
_m[0x80A8] = (1,) # GL_SAMPLE_BUFFERS_SGIS
_m[0x80A0] = (1,) # GL_SAMPLE_COVERAGE
_m[0x80A0] = (1,) # GL_SAMPLE_COVERAGE_ARB
_m[0x80AB] = (1,) # GL_SAMPLE_COVERAGE_INVERT
_m[0x80AB] = (1,) # GL_SAMPLE_COVERAGE_INVERT_ARB
_m[0x80AA] = (1,) # GL_SAMPLE_COVERAGE_VALUE
_m[0x80AA] = (1,) # GL_SAMPLE_COVERAGE_VALUE_ARB
_m[0x8E51] = (1,) # GL_SAMPLE_MASK
_m[0x80A0] = (1,) # GL_SAMPLE_MASK_EXT
_m[0x80AB] = (1,) # GL_SAMPLE_MASK_INVERT_SGIS
_m[0x8E51] = (1,) # GL_SAMPLE_MASK_NV
_m[0x80A0] = (1,) # GL_SAMPLE_MASK_SGIS
_m[0x8E52] = (1,) # GL_SAMPLE_MASK_VALUE
_m[0x80AA] = (1,) # GL_SAMPLE_MASK_VALUE_SGIS
_m[0x80AC] = (1,) # GL_SAMPLE_PATTERN_EXT
_m[0x80AC] = (1,) # GL_SAMPLE_PATTERN_SGIS
_m[0x8E50] = (1,) # GL_SAMPLE_POSITION
_m[0x8C36] = (1,) # GL_SAMPLE_SHADING_ARB
_m[0x0C10] = (4,) # GL_SCISSOR_BOX
_m[0x0C11] = (1,) # GL_SCISSOR_TEST
_m[0x845E] = (1,) # GL_SECONDARY_COLOR_ARRAY
_m[0x889C] = (1,) # GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING
_m[0x889C] = (1,) # GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB
_m[0x8F31] = (1,) # GL_SECONDARY_COLOR_ARRAY_LENGTH_NV
_m[0x845A] = (1,) # GL_SECONDARY_COLOR_ARRAY_SIZE
_m[0x845A] = (1,) # GL_SECONDARY_COLOR_ARRAY_SIZE_EXT
_m[0x845C] = (1,) # GL_SECONDARY_COLOR_ARRAY_STRIDE
_m[0x845C] = (1,) # GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT
_m[0x845B] = (1,) # GL_SECONDARY_COLOR_ARRAY_TYPE
_m[0x845B] = (1,) # GL_SECONDARY_COLOR_ARRAY_TYPE_EXT
_m[0x0DF3] = (1,) # GL_SELECTION_BUFFER_POINTER
_m[0x0DF4] = (1,) # GL_SELECTION_BUFFER_SIZE
_m[0x8012] = (1,) # GL_SEPARABLE_2D
_m[0x8012] = (1,) # GL_SEPARABLE_2D_EXT
_m[0x8DFA] = (1,) # GL_SHADER_COMPILER
_m[0x86DF] = (1,) # GL_SHADER_OPERATION_NV
_m[0x8B88] = (1,) # GL_SHADER_SOURCE_LENGTH
_m[0x90D2] = (1,) # GL_SHADER_STORAGE_BUFFER
_m[0x90D3] = (1,) # GL_SHADER_STORAGE_BUFFER_BINDING
_m[0x90DF] = (1,) # GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT
_m[0x90D5] = (1,) # GL_SHADER_STORAGE_BUFFER_SIZE
_m[0x90D4] = (1,) # GL_SHADER_STORAGE_BUFFER_START
_m[0x8B4F] = (1,) # GL_SHADER_TYPE
_m[0x0B54] = (1,) # GL_SHADE_MODEL
_m[0x8B8C] = (1,) # GL_SHADING_LANGUAGE_VERSION
_m[0x1601] = (1,) # GL_SHININESS
_m[0x0B23] = (1,) # GL_SMOOTH_LINE_WIDTH_GRANULARITY
_m[0x0B22] = (2,) # GL_SMOOTH_LINE_WIDTH_RANGE
_m[0x0B13] = (1,) # GL_SMOOTH_POINT_SIZE_GRANULARITY
_m[0x0B12] = (2,) # GL_SMOOTH_POINT_SIZE_RANGE
_m[0x858B] = (1,) # GL_SOURCE3_ALPHA_NV
_m[0x8583] = (1,) # GL_SOURCE3_RGB_NV
_m[0x1202] = (4,) # GL_SPECULAR
_m[0x1206] = (1,) # GL_SPOT_CUTOFF
_m[0x1204] = (3,) # GL_SPOT_DIRECTION
_m[0x1205] = (1,) # GL_SPOT_EXPONENT
_m[0x814A] = (3,) # GL_SPRITE_AXIS_SGIX
_m[0x8149] = (1,) # GL_SPRITE_MODE_SGIX
_m[0x8148] = (1,) # GL_SPRITE_SGIX
_m[0x814B] = (3,) # GL_SPRITE_TRANSLATION_SGIX
_m[0x8588] = (1,) # GL_SRC0_ALPHA
_m[0x8580] = (1,) # GL_SRC0_RGB
_m[0x8589] = (1,) # GL_SRC1_ALPHA
_m[0x8581] = (1,) # GL_SRC1_RGB
_m[0x858A] = (1,) # GL_SRC2_ALPHA
_m[0x8582] = (1,) # GL_SRC2_RGB
_m[0x8801] = (1,) # GL_STENCIL_BACK_FAIL
_m[0x8801] = (1,) # GL_STENCIL_BACK_FAIL_ATI
_m[0x8800] = (1,) # GL_STENCIL_BACK_FUNC
_m[0x8800] = (1,) # GL_STENCIL_BACK_FUNC_ATI
_m[0x874D] = (1,) # GL_STENCIL_BACK_OP_VALUE_AMD
_m[0x8802] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_FAIL
_m[0x8802] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI
_m[0x8803] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_PASS
_m[0x8803] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI
_m[0x8CA3] = (1,) # GL_STENCIL_BACK_REF
_m[0x8CA4] = (1,) # GL_STENCIL_BACK_VALUE_MASK
_m[0x8CA5] = (1,) # GL_STENCIL_BACK_WRITEMASK
_m[0x0D57] = (1,) # GL_STENCIL_BITS
_m[0x88F3] = (1,) # GL_STENCIL_CLEAR_TAG_VALUE_EXT
_m[0x0B91] = (1,) # GL_STENCIL_CLEAR_VALUE
_m[0x0B94] = (1,) # GL_STENCIL_FAIL
_m[0x0B92] = (1,) # GL_STENCIL_FUNC
_m[0x874C] = (1,) # GL_STENCIL_OP_VALUE_AMD
_m[0x0B95] = (1,) # GL_STENCIL_PASS_DEPTH_FAIL
_m[0x0B96] = (1,) # GL_STENCIL_PASS_DEPTH_PASS
_m[0x0B97] = (1,) # GL_STENCIL_REF
_m[0x88F2] = (1,) # GL_STENCIL_TAG_BITS_EXT
_m[0x0B90] = (1,) # GL_STENCIL_TEST
_m[0x8910] = (1,) # GL_STENCIL_TEST_TWO_SIDE_EXT
_m[0x0B93] = (1,) # GL_STENCIL_VALUE_MASK
_m[0x0B98] = (1,) # GL_STENCIL_WRITEMASK
_m[0x0C33] = (1,) # GL_STEREO
_m[0x0D50] = (1,) # GL_SUBPIXEL_BITS
_m[0x8439] = (1,) # GL_TANGENT_ARRAY_EXT
_m[0x8442] = (1,) # GL_TANGENT_ARRAY_POINTER_EXT
_m[0x843F] = (1,) # GL_TANGENT_ARRAY_STRIDE_EXT
_m[0x843E] = (1,) # GL_TANGENT_ARRAY_TYPE_EXT
_m[0x9004] = (1,) # GL_TESSELLATION_MODE_AMD
_m[0x8E88] = (1,) # GL_TESS_CONTROL_SHADER
_m[0x8E87] = (1,) # GL_TESS_EVALUATION_SHADER
_m[0x8E76] = (1,) # GL_TESS_GEN_MODE
_m[0x8E79] = (1,) # GL_TESS_GEN_POINT_MODE
_m[0x8E77] = (1,) # GL_TESS_GEN_SPACING
_m[0x8E78] = (1,) # GL_TESS_GEN_VERTEX_ORDER
_m[0x0DE0] = (1,) # GL_TEXTURE_1D
_m[0x8068] = (1,) # GL_TEXTURE_1D_BINDING_EXT
_m[0x875D] = (1,) # GL_TEXTURE_1D_STACK_BINDING_MESAX
_m[0x8759] = (1,) # GL_TEXTURE_1D_STACK_MESAX
_m[0x0DE1] = (1,) # GL_TEXTURE_2D
_m[0x8069] = (1,) # GL_TEXTURE_2D_BINDING_EXT
_m[0x875E] = (1,) # GL_TEXTURE_2D_STACK_BINDING_MESAX
_m[0x875A] = (1,) # GL_TEXTURE_2D_STACK_MESAX
_m[0x806F] = (1,) # GL_TEXTURE_3D
_m[0x806A] = (1,) # GL_TEXTURE_3D_BINDING_EXT
_m[0x806F] = (1,) # GL_TEXTURE_3D_EXT
_m[0x806F] = (1,) # GL_TEXTURE_3D_OES
_m[0x814F] = (1,) # GL_TEXTURE_4D_BINDING_SGIS
_m[0x8134] = (1,) # GL_TEXTURE_4D_SGIS
_m[0x805F] = (1,) # GL_TEXTURE_ALPHA_SIZE
_m[0x8C13] = (1,) # GL_TEXTURE_ALPHA_TYPE
_m[0x834F] = (1,) # GL_TEXTURE_APPLICATION_MODE_EXT
_m[0x813C] = (1,) # GL_TEXTURE_BASE_LEVEL
_m[0x813C] = (1,) # GL_TEXTURE_BASE_LEVEL_SGIS
_m[0x8068] = (1,) # GL_TEXTURE_BINDING_1D
_m[0x8C1C] = (1,) # GL_TEXTURE_BINDING_1D_ARRAY
_m[0x8C1C] = (1,) # GL_TEXTURE_BINDING_1D_ARRAY_EXT
_m[0x8069] = (1,) # GL_TEXTURE_BINDING_2D
_m[0x8C1D] = (1,) # GL_TEXTURE_BINDING_2D_ARRAY
_m[0x8C1D] = (1,) # GL_TEXTURE_BINDING_2D_ARRAY_EXT
_m[0x9104] = (1,) # GL_TEXTURE_BINDING_2D_MULTISAMPLE
_m[0x9105] = (1,) # GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY
_m[0x806A] = (1,) # GL_TEXTURE_BINDING_3D
_m[0x8C2C] = (1,) # GL_TEXTURE_BINDING_BUFFER
_m[0x8C2C] = (1,) # GL_TEXTURE_BINDING_BUFFER_ARB
_m[0x8C2C] = (1,) # GL_TEXTURE_BINDING_BUFFER_EXT
_m[0x8514] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP
_m[0x8514] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP_ARB
_m[0x900A] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP_ARRAY
_m[0x900A] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB
_m[0x84F6] = (1,) # GL_TEXTURE_BINDING_RECTANGLE
_m[0x84F6] = (1,) # GL_TEXTURE_BINDING_RECTANGLE_ARB
_m[0x84F6] = (1,) # GL_TEXTURE_BINDING_RECTANGLE_NV
_m[0x8E53] = (1,) # GL_TEXTURE_BINDING_RENDERBUFFER_NV
_m[0x805E] = (1,) # GL_TEXTURE_BLUE_SIZE
_m[0x8C12] = (1,) # GL_TEXTURE_BLUE_TYPE
_m[0x1005] = (1,) # GL_TEXTURE_BORDER
_m[0x1004] = (4,) # GL_TEXTURE_BORDER_COLOR
_m[0x1004] = (4,) # GL_TEXTURE_BORDER_COLOR_NV
_m[0x8C2A] = (1,) # GL_TEXTURE_BUFFER
_m[0x8C2A] = (1,) # GL_TEXTURE_BUFFER_ARB
_m[0x8C2D] = (1,) # GL_TEXTURE_BUFFER_DATA_STORE_BINDING
_m[0x8C2D] = (1,) # GL_TEXTURE_BUFFER_DATA_STORE_BINDING_ARB
_m[0x8C2D] = (1,) # GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT
_m[0x8C2A] = (1,) # GL_TEXTURE_BUFFER_EXT
_m[0x8C2E] = (1,) # GL_TEXTURE_BUFFER_FORMAT_ARB
_m[0x8C2E] = (1,) # GL_TEXTURE_BUFFER_FORMAT_EXT
_m[0x919D] = (1,) # GL_TEXTURE_BUFFER_OFFSET
_m[0x919F] = (1,) # GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT
_m[0x919E] = (1,) # GL_TEXTURE_BUFFER_SIZE
_m[0x8171] = (2,) # GL_TEXTURE_CLIPMAP_CENTER_SGIX
_m[0x8176] = (1,) # GL_TEXTURE_CLIPMAP_DEPTH_SGIX
_m[0x8172] = (1,) # GL_TEXTURE_CLIPMAP_FRAME_SGIX
_m[0x8173] = (2,) # GL_TEXTURE_CLIPMAP_OFFSET_SGIX
_m[0x8174] = (3,) # GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX
_m[0x80BC] = (1,) # GL_TEXTURE_COLOR_TABLE_SGI
_m[0x81EF] = (4,) # GL_TEXTURE_COLOR_WRITEMASK_SGIS
_m[0x80BF] = (1,) # GL_TEXTURE_COMPARE_FAIL_VALUE_ARB
_m[0x884D] = (1,) # GL_TEXTURE_COMPARE_FUNC
_m[0x884C] = (1,) # GL_TEXTURE_COMPARE_MODE
_m[0x819B] = (1,) # GL_TEXTURE_COMPARE_OPERATOR_SGIX
_m[0x819A] = (1,) # GL_TEXTURE_COMPARE_SGIX
_m[0x86A1] = (1,) # GL_TEXTURE_COMPRESSED
_m[0x86A0] = (1,) # GL_TEXTURE_COMPRESSED_IMAGE_SIZE
_m[0x84EF] = (1,) # GL_TEXTURE_COMPRESSION_HINT
_m[0x84EF] = (1,) # GL_TEXTURE_COMPRESSION_HINT_ARB
_m[0x8078] = (1,) # GL_TEXTURE_COORD_ARRAY
_m[0x889A] = (1,) # GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING
_m[0x889A] = (1,) # GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB
_m[0x808B] = (1,) # GL_TEXTURE_COORD_ARRAY_COUNT_EXT
_m[0x8078] = (1,) # GL_TEXTURE_COORD_ARRAY_EXT
_m[0x8092] = (1,) # GL_TEXTURE_COORD_ARRAY_POINTER
_m[0x8088] = (1,) # GL_TEXTURE_COORD_ARRAY_SIZE
_m[0x8088] = (1,) # GL_TEXTURE_COORD_ARRAY_SIZE_EXT
_m[0x808A] = (1,) # GL_TEXTURE_COORD_ARRAY_STRIDE
_m[0x808A] = (1,) # GL_TEXTURE_COORD_ARRAY_STRIDE_EXT
_m[0x8089] = (1,) # GL_TEXTURE_COORD_ARRAY_TYPE
_m[0x8089] = (1,) # GL_TEXTURE_COORD_ARRAY_TYPE_EXT
_m[0x8B9D] = (4,) # GL_TEXTURE_CROP_RECT_OES
_m[0x8513] = (1,) # GL_TEXTURE_CUBE_MAP
_m[0x8513] = (1,) # GL_TEXTURE_CUBE_MAP_ARB
_m[0x9009] = (1,) # GL_TEXTURE_CUBE_MAP_ARRAY
_m[0x884F] = (1,) # GL_TEXTURE_CUBE_MAP_SEAMLESS
_m[0x8071] = (1,) # GL_TEXTURE_DEPTH
_m[0x8071] = (1,) # GL_TEXTURE_DEPTH_EXT
_m[0x884A] = (1,) # GL_TEXTURE_DEPTH_SIZE
_m[0x8C16] = (1,) # GL_TEXTURE_DEPTH_TYPE
_m[0x2201] = (4,) # GL_TEXTURE_ENV_COLOR
_m[0x2200] = (1,) # GL_TEXTURE_ENV_MODE
_m[0x9107] = (1,) # GL_TEXTURE_FIXED_SAMPLE_LOCATIONS
_m[0x87FC] = (1,) # GL_TEXTURE_FREE_MEMORY_ATI
_m[0x2500] = (1,) # GL_TEXTURE_GEN_MODE
_m[0x0C63] = (1,) # GL_TEXTURE_GEN_Q
_m[0x0C62] = (1,) # GL_TEXTURE_GEN_R
_m[0x0C60] = (1,) # GL_TEXTURE_GEN_S
_m[0x0C61] = (1,) # GL_TEXTURE_GEN_T
_m[0x805D] = (1,) # GL_TEXTURE_GREEN_SIZE
_m[0x8C11] = (1,) # GL_TEXTURE_GREEN_TYPE
_m[0x1001] = (1,) # GL_TEXTURE_HEIGHT
_m[0x912F] = (1,) # GL_TEXTURE_IMMUTABLE_FORMAT
_m[0x82DF] = (1,) # GL_TEXTURE_IMMUTABLE_LEVELS
_m[0x80ED] = (1,) # GL_TEXTURE_INDEX_SIZE_EXT
_m[0x8061] = (1,) # GL_TEXTURE_INTENSITY_SIZE
_m[0x8C15] = (1,) # GL_TEXTURE_INTENSITY_TYPE
_m[0x1003] = (1,) # GL_TEXTURE_INTERNAL_FORMAT
_m[0x8350] = (1,) # GL_TEXTURE_LIGHT_EXT
_m[0x8501] = (1,) # GL_TEXTURE_LOD_BIAS
_m[0x8190] = (1,) # GL_TEXTURE_LOD_BIAS_R_SGIX
_m[0x818E] = (1,) # GL_TEXTURE_LOD_BIAS_S_SGIX
_m[0x818F] = (1,) # GL_TEXTURE_LOD_BIAS_T_SGIX
_m[0x8060] = (1,) # GL_TEXTURE_LUMINANCE_SIZE
_m[0x8C14] = (1,) # GL_TEXTURE_LUMINANCE_TYPE
_m[0x2800] = (1,) # GL_TEXTURE_MAG_FILTER
_m[0x8351] = (1,) # GL_TEXTURE_MATERIAL_FACE_EXT
_m[0x8352] = (1,) # GL_TEXTURE_MATERIAL_PARAMETER_EXT
_m[0x0BA8] = (4, 4) # GL_TEXTURE_MATRIX
_m[0x84FE] = (1,) # GL_TEXTURE_MAX_ANISOTROPY_EXT
_m[0x836B] = (1,) # GL_TEXTURE_MAX_CLAMP_R_SGIX
_m[0x8369] = (1,) # GL_TEXTURE_MAX_CLAMP_S_SGIX
_m[0x836A] = (1,) # GL_TEXTURE_MAX_CLAMP_T_SGIX
_m[0x813D] = (1,) # GL_TEXTURE_MAX_LEVEL
_m[0x813D] = (1,) # GL_TEXTURE_MAX_LEVEL_SGIS
_m[0x813B] = (1,) # GL_TEXTURE_MAX_LOD
_m[0x813B] = (1,) # GL_TEXTURE_MAX_LOD_SGIS
_m[0x2801] = (1,) # GL_TEXTURE_MIN_FILTER
_m[0x813A] = (1,) # GL_TEXTURE_MIN_LOD
_m[0x813A] = (1,) # GL_TEXTURE_MIN_LOD_SGIS
_m[0x8066] = (1,) # GL_TEXTURE_PRIORITY
_m[0x85B8] = (1,) # GL_TEXTURE_RANGE_POINTER_APPLE
_m[0x84F5] = (1,) # GL_TEXTURE_RECTANGLE
_m[0x84F5] = (1,) # GL_TEXTURE_RECTANGLE_ARB
_m[0x84F5] = (1,) # GL_TEXTURE_RECTANGLE_NV
_m[0x805C] = (1,) # GL_TEXTURE_RED_SIZE
_m[0x8C10] = (1,) # GL_TEXTURE_RED_TYPE
_m[0x8E54] = (1,) # GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV
_m[0x8067] = (1,) # GL_TEXTURE_RESIDENT
_m[0x9106] = (1,) # GL_TEXTURE_SAMPLES
_m[0x86DE] = (1,) # GL_TEXTURE_SHADER_NV
_m[0x8A48] = (1,) # GL_TEXTURE_SRGB_DECODE_EXT
_m[0x0BA5] = (1,) # GL_TEXTURE_STACK_DEPTH
_m[0x88F1] = (1,) # GL_TEXTURE_STENCIL_SIZE
_m[0x85BC] = (1,) # GL_TEXTURE_STORAGE_HINT_APPLE
_m[0x8E45] = (1,) # GL_TEXTURE_SWIZZLE_A
_m[0x8E44] = (1,) # GL_TEXTURE_SWIZZLE_B
_m[0x8E43] = (1,) # GL_TEXTURE_SWIZZLE_G
_m[0x8E42] = (1,) # GL_TEXTURE_SWIZZLE_R
_m[0x8E46] = (4,) # GL_TEXTURE_SWIZZLE_RGBA
_m[0x888F] = (1,) # GL_TEXTURE_UNSIGNED_REMAP_MODE_NV
_m[0x82DD] = (1,) # GL_TEXTURE_VIEW_MIN_LAYER
_m[0x82DB] = (1,) # GL_TEXTURE_VIEW_MIN_LEVEL
_m[0x82DE] = (1,) # GL_TEXTURE_VIEW_NUM_LAYERS
_m[0x82DC] = (1,) # GL_TEXTURE_VIEW_NUM_LEVELS
_m[0x1000] = (1,) # GL_TEXTURE_WIDTH
_m[0x8137] = (1,) # GL_TEXTURE_WRAP_Q_SGIS
_m[0x8072] = (1,) # GL_TEXTURE_WRAP_R
_m[0x8072] = (1,) # GL_TEXTURE_WRAP_R_EXT
_m[0x2802] = (1,) # GL_TEXTURE_WRAP_S
_m[0x2803] = (1,) # GL_TEXTURE_WRAP_T
_m[0x8200] = (1,) # GL_TEXT_FRAGMENT_SHADER_ATI
_m[0x8E28] = (1,) # GL_TIMESTAMP
_m[0x930C] = (1,) # GL_TOP_LEVEL_ARRAY_SIZE
_m[0x930D] = (1,) # GL_TOP_LEVEL_ARRAY_STRIDE
_m[0x8E25] = (1,) # GL_TRANSFORM_FEEDBACK_BINDING
_m[0x8E25] = (1,) # GL_TRANSFORM_FEEDBACK_BINDING_NV
_m[0x8E24] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE
_m[0x8E24] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE_NV
_m[0x8C8F] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_BINDING
_m[0x8C7F] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV
_m[0x8E23] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED
_m[0x8E23] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED_NV
_m[0x8C85] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_SIZE
_m[0x8C84] = (1,) # GL_TRANSFORM_FEEDBACK_BUFFER_START
_m[0x84E6] = (4,4) # GL_TRANSPOSE_COLOR_MATRIX
_m[0x84E6] = (4,4) # GL_TRANSPOSE_COLOR_MATRIX_ARB
_m[0x88B7] = (4, 4) # GL_TRANSPOSE_CURRENT_MATRIX_ARB
_m[0x84E3] = (4,4) # GL_TRANSPOSE_MODELVIEW_MATRIX
_m[0x84E3] = (4,4) # GL_TRANSPOSE_MODELVIEW_MATRIX_ARB
_m[0x84E4] = (4,4) # GL_TRANSPOSE_PROJECTION_MATRIX
_m[0x84E4] = (4,4) # GL_TRANSPOSE_PROJECTION_MATRIX_ARB
_m[0x84E5] = (4,4) # GL_TRANSPOSE_TEXTURE_MATRIX
_m[0x84E5] = (4,4) # GL_TRANSPOSE_TEXTURE_MATRIX_ARB
_m[0x92FA] = (1,) # GL_TYPE
_m[0x8A3C] = (1,) # GL_UNIFORM_ARRAY_STRIDE
_m[0x8A42] = (1,) # GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS
_m[0x8A43] = (1,) # GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES
_m[0x8A3F] = (1,) # GL_UNIFORM_BLOCK_BINDING
_m[0x8A40] = (1,) # GL_UNIFORM_BLOCK_DATA_SIZE
_m[0x8A3A] = (1,) # GL_UNIFORM_BLOCK_INDEX
_m[0x8A41] = (1,) # GL_UNIFORM_BLOCK_NAME_LENGTH
_m[0x8A46] = (1,) # GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER
_m[0x8A45] = (1,) # GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER
_m[0x84F0] = (1,) # GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER
_m[0x84F1] = (1,) # GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER
_m[0x8A44] = (1,) # GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER
_m[0x8A28] = (1,) # GL_UNIFORM_BUFFER_BINDING
_m[0x8DEF] = (1,) # GL_UNIFORM_BUFFER_BINDING_EXT
_m[0x8A34] = (1,) # GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT
_m[0x8A2A] = (1,) # GL_UNIFORM_BUFFER_SIZE
_m[0x8A29] = (1,) # GL_UNIFORM_BUFFER_START
_m[0x8A3E] = (1,) # GL_UNIFORM_IS_ROW_MAJOR
_m[0x8A3D] = (1,) # GL_UNIFORM_MATRIX_STRIDE
_m[0x8A39] = (1,) # GL_UNIFORM_NAME_LENGTH
_m[0x8A3B] = (1,) # GL_UNIFORM_OFFSET
_m[0x8A38] = (1,) # GL_UNIFORM_SIZE
_m[0x8A37] = (1,) # GL_UNIFORM_TYPE
_m[0x0CF5] = (1,) # GL_UNPACK_ALIGNMENT
_m[0x85B2] = (1,) # GL_UNPACK_CLIENT_STORAGE_APPLE
_m[0x800F] = (1,) # GL_UNPACK_CMYK_HINT_EXT
_m[0x9129] = (1,) # GL_UNPACK_COMPRESSED_BLOCK_DEPTH
_m[0x9128] = (1,) # GL_UNPACK_COMPRESSED_BLOCK_HEIGHT
_m[0x912A] = (1,) # GL_UNPACK_COMPRESSED_BLOCK_SIZE
_m[0x9127] = (1,) # GL_UNPACK_COMPRESSED_BLOCK_WIDTH
_m[0x8133] = (1,) # GL_UNPACK_IMAGE_DEPTH_SGIS
_m[0x806E] = (1,) # GL_UNPACK_IMAGE_HEIGHT
_m[0x806E] = (1,) # GL_UNPACK_IMAGE_HEIGHT_EXT
_m[0x0CF1] = (1,) # GL_UNPACK_LSB_FIRST
_m[0x8985] = (1,) # GL_UNPACK_RESAMPLE_OML
_m[0x842D] = (1,) # GL_UNPACK_RESAMPLE_SGIX
_m[0x8A16] = (1,) # GL_UNPACK_ROW_BYTES_APPLE
_m[0x0CF2] = (1,) # GL_UNPACK_ROW_LENGTH
_m[0x806D] = (1,) # GL_UNPACK_SKIP_IMAGES
_m[0x806D] = (1,) # GL_UNPACK_SKIP_IMAGES_EXT
_m[0x0CF4] = (1,) # GL_UNPACK_SKIP_PIXELS
_m[0x0CF3] = (1,) # GL_UNPACK_SKIP_ROWS
_m[0x8132] = (1,) # GL_UNPACK_SKIP_VOLUMES_SGIS
_m[0x0CF0] = (1,) # GL_UNPACK_SWAP_BYTES
_m[0x8B83] = (1,) # GL_VALIDATE_STATUS
_m[0x87E7] = (1,) # GL_VARIANT_ARRAY_TYPE_EXT
_m[0x87FB] = (1,) # GL_VBO_FREE_MEMORY_ATI
_m[0x1F00] = (1,) # GL_VENDOR
_m[0x1F02] = (1,) # GL_VERSION
_m[0x8074] = (1,) # GL_VERTEX_ARRAY
_m[0x85B5] = (1,) # GL_VERTEX_ARRAY_BINDING
_m[0x85B5] = (1,) # GL_VERTEX_ARRAY_BINDING_APPLE
_m[0x8896] = (1,) # GL_VERTEX_ARRAY_BUFFER_BINDING
_m[0x8896] = (1,) # GL_VERTEX_ARRAY_BUFFER_BINDING_ARB
_m[0x807D] = (1,) # GL_VERTEX_ARRAY_COUNT_EXT
_m[0x8074] = (1,) # GL_VERTEX_ARRAY_EXT
_m[0x8F2B] = (1,) # GL_VERTEX_ARRAY_LENGTH_NV
_m[0x808E] = (1,) # GL_VERTEX_ARRAY_POINTER
_m[0x851E] = (1,) # GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE
_m[0x851E] = (1,) # GL_VERTEX_ARRAY_RANGE_LENGTH_NV
_m[0x851D] = (1,) # GL_VERTEX_ARRAY_RANGE_NV
_m[0x8521] = (1,) # GL_VERTEX_ARRAY_RANGE_POINTER_NV
_m[0x851F] = (1,) # GL_VERTEX_ARRAY_RANGE_VALID_NV
_m[0x807A] = (1,) # GL_VERTEX_ARRAY_SIZE
_m[0x807A] = (1,) # GL_VERTEX_ARRAY_SIZE_EXT
_m[0x807C] = (1,) # GL_VERTEX_ARRAY_STRIDE
_m[0x807C] = (1,) # GL_VERTEX_ARRAY_STRIDE_EXT
_m[0x807B] = (1,) # GL_VERTEX_ARRAY_TYPE
_m[0x807B] = (1,) # GL_VERTEX_ARRAY_TYPE_EXT
_m[0x8650] = (1,) # GL_VERTEX_ATTRIB_ARRAY0_NV
_m[0x865A] = (1,) # GL_VERTEX_ATTRIB_ARRAY10_NV
_m[0x865B] = (1,) # GL_VERTEX_ATTRIB_ARRAY11_NV
_m[0x865C] = (1,) # GL_VERTEX_ATTRIB_ARRAY12_NV
_m[0x865D] = (1,) # GL_VERTEX_ATTRIB_ARRAY13_NV
_m[0x865E] = (1,) # GL_VERTEX_ATTRIB_ARRAY14_NV
_m[0x865F] = (1,) # GL_VERTEX_ATTRIB_ARRAY15_NV
_m[0x8651] = (1,) # GL_VERTEX_ATTRIB_ARRAY1_NV
_m[0x8652] = (1,) # GL_VERTEX_ATTRIB_ARRAY2_NV
_m[0x8653] = (1,) # GL_VERTEX_ATTRIB_ARRAY3_NV
_m[0x8654] = (1,) # GL_VERTEX_ATTRIB_ARRAY4_NV
_m[0x8655] = (1,) # GL_VERTEX_ATTRIB_ARRAY5_NV
_m[0x8656] = (1,) # GL_VERTEX_ATTRIB_ARRAY6_NV
_m[0x8657] = (1,) # GL_VERTEX_ATTRIB_ARRAY7_NV
_m[0x8658] = (1,) # GL_VERTEX_ATTRIB_ARRAY8_NV
_m[0x8659] = (1,) # GL_VERTEX_ATTRIB_ARRAY9_NV
_m[0x889F] = (1,) # GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING
_m[0x88FE] = (1,) # GL_VERTEX_ATTRIB_ARRAY_DIVISOR
_m[0x8622] = (1,) # GL_VERTEX_ATTRIB_ARRAY_ENABLED
_m[0x88FD] = (1,) # GL_VERTEX_ATTRIB_ARRAY_INTEGER
_m[0x886A] = (1,) # GL_VERTEX_ATTRIB_ARRAY_NORMALIZED
_m[0x8645] = (1,) # GL_VERTEX_ATTRIB_ARRAY_POINTER
_m[0x8623] = (1,) # GL_VERTEX_ATTRIB_ARRAY_SIZE
_m[0x8624] = (1,) # GL_VERTEX_ATTRIB_ARRAY_STRIDE
_m[0x8625] = (1,) # GL_VERTEX_ATTRIB_ARRAY_TYPE
_m[0x82D4] = (1,) # GL_VERTEX_ATTRIB_BINDING
_m[0x82D5] = (1,) # GL_VERTEX_ATTRIB_RELATIVE_OFFSET
_m[0x82D6] = (1,) # GL_VERTEX_BINDING_DIVISOR
_m[0x82D7] = (1,) # GL_VERTEX_BINDING_OFFSET
_m[0x82D8] = (1,) # GL_VERTEX_BINDING_STRIDE
_m[0x86A7] = (1,) # GL_VERTEX_BLEND_ARB
_m[0x83EF] = (1,) # GL_VERTEX_PRECLIP_HINT_SGIX
_m[0x83EE] = (1,) # GL_VERTEX_PRECLIP_SGIX
_m[0x8620] = (1,) # GL_VERTEX_PROGRAM_ARB
_m[0x864A] = (1,) # GL_VERTEX_PROGRAM_BINDING_NV
_m[0x8620] = (1,) # GL_VERTEX_PROGRAM_NV
_m[0x8DA2] = (1,) # GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV
_m[0x8642] = (1,) # GL_VERTEX_PROGRAM_POINT_SIZE
_m[0x8642] = (1,) # GL_VERTEX_PROGRAM_POINT_SIZE_ARB
_m[0x8642] = (1,) # GL_VERTEX_PROGRAM_POINT_SIZE_NV
_m[0x8643] = (1,) # GL_VERTEX_PROGRAM_TWO_SIDE
_m[0x8643] = (1,) # GL_VERTEX_PROGRAM_TWO_SIDE_ARB
_m[0x8643] = (1,) # GL_VERTEX_PROGRAM_TWO_SIDE_NV
_m[0x8B31] = (1,) # GL_VERTEX_SHADER
_m[0x8781] = (1,) # GL_VERTEX_SHADER_BINDING_EXT
_m[0x8780] = (1,) # GL_VERTEX_SHADER_EXT
_m[0x87CF] = (1,) # GL_VERTEX_SHADER_INSTRUCTIONS_EXT
_m[0x87D1] = (1,) # GL_VERTEX_SHADER_INVARIANTS_EXT
_m[0x87D3] = (1,) # GL_VERTEX_SHADER_LOCALS_EXT
_m[0x87D2] = (1,) # GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT
_m[0x87D4] = (1,) # GL_VERTEX_SHADER_OPTIMIZED_EXT
_m[0x87D0] = (1,) # GL_VERTEX_SHADER_VARIANTS_EXT
_m[0x850C] = (1,) # GL_VERTEX_WEIGHT_ARRAY_EXT
_m[0x8510] = (1,) # GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT
_m[0x850D] = (1,) # GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT
_m[0x850F] = (1,) # GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT
_m[0x850E] = (1,) # GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT
_m[0x8719] = (1,) # GL_VIBRANCE_BIAS_NV
_m[0x8713] = (1,) # GL_VIBRANCE_SCALE_NV
_m[0x9021] = (1,) # GL_VIDEO_BUFFER_BINDING_NV
_m[0x0BA2] = (4,) # GL_VIEWPORT
_m[0x825D] = (2,) # GL_VIEWPORT_BOUNDS_RANGE
_m[0x825F] = (1,) # GL_VIEWPORT_INDEX_PROVOKING_VERTEX
_m[0x825C] = (1,) # GL_VIEWPORT_SUBPIXEL_BITS
_m[0x86AD] = (1,) # GL_WEIGHT_ARRAY_ARB
_m[0x889E] = (1,) # GL_WEIGHT_ARRAY_BUFFER_BINDING
_m[0x889E] = (1,) # GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB
_m[0x86AC] = (1,) # GL_WEIGHT_ARRAY_POINTER_ARB
_m[0x86AB] = (1,) # GL_WEIGHT_ARRAY_SIZE_ARB
_m[0x86AA] = (1,) # GL_WEIGHT_ARRAY_STRIDE_ARB
_m[0x86A9] = (1,) # GL_WEIGHT_ARRAY_TYPE_ARB
_m[0x86A6] = (1,) # GL_WEIGHT_SUM_UNITY_ARB
_m[0x887A] = (1,) # GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV
_m[0x0D16] = (1,) # GL_ZOOM_X
_m[0x0D17] = (1,) # GL_ZOOM_Y
|
bobobox/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/docker/docker_network.py
|
22
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
module: docker_network
version_added: "2.2"
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
required: true
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
default: null
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
default: null
force:
description:
- With state I(absent) forces disconnecting all containers from the
network prior to deleting the network. With state I(present) will
disconnect all containers, delete the network and re-create the
network. This option is required if you have changed the IPAM or
driver options and want an existing network to be updated to use the
new options.
default: false
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
Use C(appends) to leave existing containers connected.
default: false
aliases:
- incremental
ipam_driver:
description:
- Specify an IPAM driver.
default: null
ipam_options:
description:
- Dictionary of IPAM options.
default: null
state:
description:
- I(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the C(force) option to disconnect all containers
and delete the network.
- I(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
C(appends) option to leave existing containers connected. Use the C(force)
options to force re-creation of the network.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
authors:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "The docker server >= 1.9.0"
'''
EXAMPLES = '''
- name: Create a network
docker_network:
name: network_one
- name: Remove all but selected list of containers
docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with options
docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
ipam_options:
subnet: '172.3.26.0/16'
gateway: 172.3.26.1
iprange: '192.168.1.0/24'
- name: Delete a network, disconnecting all containers
docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
facts:
description: Network inspection results for the affected network.
returned: success
type: complex
sample: {}
'''
from ansible.module_utils.docker_common import *
try:
from docker import utils
if HAS_DOCKER_PY_2:
from docker.types import Ulimit, IPAMPool, IPAMConfig
else:
from docker.utils.types import Ulimit
except:
# missing docker-py handled in ansible.module_utils.docker
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.network_name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_options = None
self.appends = None
self.force = None
self.debug = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()]
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
def get_existing_network(self):
networks = self.client.networks()
network = None
for n in networks:
if n['Name'] == self.parameters.network_name:
network = n
return network
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
different = False
differences = []
if self.parameters.driver and self.parameters.driver != net['Driver']:
different = True
differences.append('driver')
if self.parameters.driver_options:
if not net.get('Options'):
different = True
differences.append('driver_options')
else:
for key, value in self.parameters.driver_options.items():
if not net['Options'].get(key) or value != net['Options'][key]:
different = True
differences.append('driver_options.%s' % key)
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
different = True
differences.append('ipam_driver')
if self.parameters.ipam_options:
if not net.get('IPAM') or not net['IPAM'].get('Config'):
different = True
differences.append('ipam_options')
else:
for key, value in self.parameters.ipam_options.items():
camelkey = None
for net_key in net['IPAM']['Config'][0]:
if key == net_key.lower():
camelkey = net_key
break
if not camelkey:
# key not found
different = True
differences.append('ipam_options.%s' % key)
elif net['IPAM']['Config'][0].get(camelkey) != value:
# key has different value
different = True
differences.append('ipam_options.%s' % key)
return different, differences
def create_network(self):
if not self.existing_network:
ipam_pools = []
if self.parameters.ipam_options:
if HAS_DOCKER_PY_2:
ipam_pools.append(IPAMPool(**self.parameters.ipam_options))
else:
ipam_pools.append(utils.create_ipam_pool(**self.parameters.ipam_options))
if HAS_DOCKER_PY_2:
ipam_config = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
else:
ipam_config = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if not self.check_mode:
resp = self.client.create_network(self.parameters.network_name,
driver=self.parameters.driver,
options=self.parameters.driver_options,
ipam=ipam_config)
self.existing_network = self.client.inspect_network(resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.network_name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.network_name)
self.results['actions'].append("Removed network %s" % (self.parameters.network_name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.network_name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
def disconnect_missing(self):
for c in self.existing_network['Containers'].values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.inspect_network(self.parameters.network_name)['Containers']
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.network_name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
def present(self):
different = False
differences = []
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.results['diff'] = differences
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
self.results['ansible_facts'] = {u'ansible_docker_network': self.get_existing_network()}
def absent(self):
self.remove_network()
def main():
argument_spec = dict(
network_name = dict(type='str', required=True, aliases=['name']),
connected = dict(type='list', default=[], aliases=['containers']),
state = dict(type='str', default='present', choices=['present', 'absent']),
driver = dict(type='str', default='bridge'),
driver_options = dict(type='dict', default={}),
force = dict(type='bool', default=False),
appends = dict(type='bool', default=False, aliases=['incremental']),
ipam_driver = dict(type='str', default=None),
ipam_options = dict(type='dict', default={}),
debug = dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True
)
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
olea/PyConES-2016
|
refs/heads/develop
|
pycones/sponsorship/admin.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from sponsorship.models import BenefitLevel, SponsorLevel, Sponsor, Benefit
from sponsorship.models import SponsorBenefit
class BenefitLevelInline(admin.TabularInline):
model = BenefitLevel
extra = 0
class SponsorBenefitInline(admin.StackedInline):
model = SponsorBenefit
extra = 0
fieldsets = [
(None, {
"fields": [
("benefit", "active"),
("max_words", "other_limits"),
"text",
"upload",
]
})
]
class SponsorAdmin(admin.ModelAdmin):
save_on_top = True
fieldsets = [
(None, {
"fields": [
("name", "applicant"),
("level", "active"),
"external_url",
"annotation",
("contact_name", "contact_email")
]
}),
("Metadata", {
"fields": ["added"],
"classes": ["collapse"]
})
]
inlines = [SponsorBenefitInline]
list_display = ["name", "external_url", "level", "active"]
def get_form(self, *args, **kwargs):
# @@@ kinda ugly but using choices= on NullBooleanField is broken
form = super(SponsorAdmin, self).get_form(*args, **kwargs)
form.base_fields["active"].widget.choices = [
(u"1", "unreviewed"),
(u"2", "approved"),
(u"3", "rejected")
]
return form
class BenefitAdmin(admin.ModelAdmin):
list_display = ["name", "type", "description"]
inlines = [BenefitLevelInline]
class SponsorLevelAdmin(admin.ModelAdmin):
inlines = [BenefitLevelInline]
admin.site.register(SponsorLevel, SponsorLevelAdmin)
admin.site.register(Sponsor, SponsorAdmin)
admin.site.register(Benefit, BenefitAdmin)
|
aricchen/openHR
|
refs/heads/master
|
openerp/addons/board/board.py
|
18
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
from textwrap import dedent
from openerp import tools
from openerp.osv import fields, osv
class board_board(osv.osv):
_name = 'board.board'
_description = "Board"
_auto = False
_columns = {}
@tools.cache()
def list(self, cr, uid, context=None):
Actions = self.pool.get('ir.actions.act_window')
Menus = self.pool.get('ir.ui.menu')
IrValues = self.pool.get('ir.values')
act_ids = Actions.search(cr, uid, [('res_model', '=', self._name)], context=context)
refs = ['%s,%s' % (Actions._name, act_id) for act_id in act_ids]
# cannot search "action" field on menu (non stored function field without search_fnct)
irv_ids = IrValues.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('key', '=', 'action'),
('key2', '=', 'tree_but_open'),
('value', 'in', refs),
], context=context)
menu_ids = map(itemgetter('res_id'), IrValues.read(cr, uid, irv_ids, ['res_id'], context=context))
menu_names = Menus.name_get(cr, uid, menu_ids, context=context)
return [dict(id=m[0], name=m[1]) for m in menu_names]
def _clear_list_cache(self):
self.list.clear_cache(self)
def create(self, cr, user, vals, context=None):
return 0
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Overrides orm field_view_get.
@return: Dictionary of Fields, arch and toolbar.
"""
res = {}
res = super(board_board, self).fields_view_get(cr, user, view_id, view_type,
context, toolbar=toolbar, submenu=submenu)
CustView = self.pool.get('ir.ui.view.custom')
vids = CustView.search(cr, user, [('user_id', '=', user), ('ref_id', '=', view_id)], context=context)
if vids:
view_id = vids[0]
arch = CustView.browse(cr, user, view_id, context=context)
res['custom_view_id'] = view_id
res['arch'] = arch.arch
res['arch'] = self._arch_preprocessing(cr, user, res['arch'], context=context)
res['toolbar'] = {'print': [], 'action': [], 'relate': []}
return res
def _arch_preprocessing(self, cr, user, arch, context=None):
from lxml import etree
def remove_unauthorized_children(node):
for child in node.iterchildren():
if child.tag == 'action' and child.get('invisible'):
node.remove(child)
else:
child = remove_unauthorized_children(child)
return node
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
archnode = etree.fromstring(encode(arch))
return etree.tostring(remove_unauthorized_children(archnode), pretty_print=True)
class board_create(osv.osv_memory):
def board_create(self, cr, uid, ids, context=None):
assert len(ids) == 1
this = self.browse(cr, uid, ids[0], context=context)
view_arch = dedent("""<?xml version="1.0"?>
<form string="%s" version="7.0">
<board style="2-1">
<column/>
<column/>
</board>
</form>
""".strip() % (this.name,))
view_id = self.pool.get('ir.ui.view').create(cr, uid, {
'name': this.name,
'model': 'board.board',
'priority': 16,
'type': 'form',
'arch': view_arch,
}, context=context)
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, {
'name': this.name,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'board.board',
'usage': 'menu',
'view_id': view_id,
'help': dedent('''<div class="oe_empty_custom_dashboard">
<p>
<b>This dashboard is empty.</b>
</p><p>
To add the first report into this dashboard, go to any
menu, switch to list or graph view, and click <i>'Add to
Dashboard'</i> in the extended search options.
</p><p>
You can filter and group data before inserting into the
dashboard using the search options.
</p>
</div>
''')
}, context=context)
menu_id = self.pool.get('ir.ui.menu').create(cr, uid, {
'name': this.name,
'parent_id': this.menu_parent_id.id,
'action': 'ir.actions.act_window,%s' % (action_id,)
}, context=context)
self.pool.get('board.board')._clear_list_cache()
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {
'menu_id': menu_id
},
}
def _default_menu_parent_id(self, cr, uid, context=None):
_, menu_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'menu_reporting_dashboard')
return menu_id
_name = "board.create"
_description = "Board Creation"
_columns = {
'name': fields.char('Board Name', size=64, required=True),
'menu_parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
}
_defaults = {
'menu_parent_id': _default_menu_parent_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Ms2ger/servo
|
refs/heads/master
|
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/reference/support/generate-text-emphasis-ruby-tests.py
|
829
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-ruby-001 ~ 004 which tests
emphasis marks with ruby in four directions. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-ruby-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="emphasis marks are drawn outside the ruby">
<link rel="match" href="text-emphasis-ruby-{index:03}-ref.html">
<p>Pass if the emphasis marks are outside the ruby:</p>
<div style="line-height: 5; writing-mode: {wm}; ruby-position: {ruby_pos}; text-emphasis-position: {posval}">ルビ<span style="text-emphasis: circle">と<ruby>圏<rt>けん</rt>点<rt>てん</rt></ruby>を</span>同時</div>
'''
REF_FILE = 'text-emphasis-ruby-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rtc {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are outside the ruby:</p>
<div style="line-height: 5; writing-mode: {wm}; ruby-position: {posval}">ルビ<ruby>と<rtc>●</rtc>圏<rt>けん</rt><rtc>●</rtc>点<rt>てん</rt><rtc>●</rtc>を<rtc>●</rtc></ruby>同時</div>
'''
TEST_CASES = [
('top', 'horizontal-tb', 'over', [
('horizontal-tb', 'over right')]),
('bottom', 'horizontal-tb', 'under', [
('horizontal-tb', 'under right')]),
('right', 'vertical-rl', 'over', [
('vertical-rl', 'over right'),
('vertical-lr', 'over right')]),
('left', 'vertical-rl', 'under', [
('vertical-rl', 'over left'),
('vertical-lr', 'over left')]),
]
SUFFIXES = ['', 'a']
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for pos, ref_wm, ruby_pos, subtests in TEST_CASES:
idx += 1
ref_file = REF_FILE.format(idx)
ref_content = REF_TEMPLATE.format(pos=pos, wm=ref_wm, posval=ruby_pos)
write_file(ref_file, ref_content)
suffix = iter(SUFFIXES)
for wm, posval in subtests:
test_file = TEST_FILE.format(idx, next(suffix))
test_content = TEST_TEMPLATE.format(
wm=wm, pos=pos, index=idx, ruby_pos=ruby_pos, posval=posval)
write_file(test_file, test_content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
|
t794104/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts/__init__.py
|
172
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# import from the compat api because 2.0-2.3 had a module_utils.facts.ansible_facts
# and get_all_facts in top level namespace
from ansible.module_utils.facts.compat import ansible_facts, get_all_facts # noqa
|
mezz64/home-assistant
|
refs/heads/dev
|
homeassistant/components/incomfort/binary_sensor.py
|
16
|
"""Support for an Intergas heater via an InComfort/InTouch Lan2RF gateway."""
from typing import Any, Dict, Optional
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorEntity,
)
from . import DOMAIN, IncomfortChild
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up an InComfort/InTouch binary_sensor device."""
if discovery_info is None:
return
client = hass.data[DOMAIN]["client"]
heaters = hass.data[DOMAIN]["heaters"]
async_add_entities([IncomfortFailed(client, h) for h in heaters])
class IncomfortFailed(IncomfortChild, BinarySensorEntity):
"""Representation of an InComfort Failed sensor."""
def __init__(self, client, heater) -> None:
"""Initialize the binary sensor."""
super().__init__()
self._unique_id = f"{heater.serial_no}_failed"
self.entity_id = f"{BINARY_SENSOR_DOMAIN}.{DOMAIN}_failed"
self._name = "Boiler Fault"
self._client = client
self._heater = heater
@property
def is_on(self) -> bool:
"""Return the status of the sensor."""
return self._heater.status["is_failed"]
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the device state attributes."""
return {"fault_code": self._heater.status["fault_code"]}
|
NCIP/gdc-docs
|
refs/heads/develop
|
docs/API/Users_Guide/scripts/BAM_Slice.py
|
1
|
import requests
import json
'''
This script will not work until $TOKEN_FILE_PATH
is replaced with an actual path.
'''
token_file = "$TOKEN_FILE_PATH"
file_id = "11443f3c-9b8b-4e47-b5b7-529468fec098"
data_endpt = "https://api.gdc.cancer.gov/slicing/view/{}".format(file_id)
with open(token_file,"r") as token:
token_string = str(token.read().strip())
params = {"gencode": ["BRCA1", "BRCA2"]}
response = requests.post(data_endpt,
data = json.dumps(params),
headers = {
"Content-Type": "application/json",
"X-Auth-Token": token_string
})
file_name = "brca_slices.bam"
with open(file_name, "wb") as output_file:
output_file.write(response.content)
|
sugarlabs/sugar-toolkit
|
refs/heads/master
|
src/sugar/graphics/panel.py
|
3
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
STABLE.
"""
import gtk
class Panel(gtk.VBox):
__gtype_name__ = 'SugarPanel'
def __init__(self):
gtk.VBox.__init__(self)
|
xbmc/xbmc-antiquated
|
refs/heads/master
|
xbmc/lib/libPython/Python/Lib/idlelib/OutputWindow.py
|
67
|
from Tkinter import *
from EditorWindow import EditorWindow
import re
import tkMessageBox
import IOBinding
class OutputWindow(EditorWindow):
"""An editor window that can serve as an output file.
Also the future base class for the Python shell window.
This class has no input facilities.
"""
def __init__(self, *args):
EditorWindow.__init__(self, *args)
self.text.bind("<<goto-file-line>>", self.goto_file_line)
# Customize EditorWindow
def ispythonsource(self, filename):
# No colorization needed
return 0
def short_title(self):
return "Output"
def maybesave(self):
# Override base class method -- don't ask any questions
if self.get_saved():
return "yes"
else:
return "no"
# Act as output file
def write(self, s, tags=(), mark="insert"):
# Tk assumes that byte strings are Latin-1;
# we assume that they are in the locale's encoding
if isinstance(s, str):
try:
s = unicode(s, IOBinding.encoding)
except UnicodeError:
# some other encoding; let Tcl deal with it
pass
self.text.insert(mark, s, tags)
self.text.see(mark)
self.text.update()
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
# Our own right-button menu
rmenu_specs = [
("Go to file/line", "<<goto-file-line>>"),
]
file_line_pats = [
r'file "([^"]*)", line (\d+)',
r'([^\s]+)\((\d+)\)',
r'([^\s]+):\s*(\d+):',
]
file_line_progs = None
def goto_file_line(self, event=None):
if self.file_line_progs is None:
l = []
for pat in self.file_line_pats:
l.append(re.compile(pat, re.IGNORECASE))
self.file_line_progs = l
# x, y = self.event.x, self.event.y
# self.text.mark_set("insert", "@%d,%d" % (x, y))
line = self.text.get("insert linestart", "insert lineend")
result = self._file_line_helper(line)
if not result:
# Try the previous line. This is handy e.g. in tracebacks,
# where you tend to right-click on the displayed source line
line = self.text.get("insert -1line linestart",
"insert -1line lineend")
result = self._file_line_helper(line)
if not result:
tkMessageBox.showerror(
"No special line",
"The line you point at doesn't look like "
"a valid file name followed by a line number.",
master=self.text)
return
filename, lineno = result
edit = self.flist.open(filename)
edit.gotoline(lineno)
def _file_line_helper(self, line):
for prog in self.file_line_progs:
m = prog.search(line)
if m:
break
else:
return None
filename, lineno = m.group(1, 2)
try:
f = open(filename, "r")
f.close()
except IOError:
return None
try:
return filename, int(lineno)
except TypeError:
return None
# These classes are currently not used but might come in handy
class OnDemandOutputWindow:
tagdefs = {
# XXX Should use IdlePrefs.ColorPrefs
"stdout": {"foreground": "blue"},
"stderr": {"foreground": "#007700"},
}
def __init__(self, flist):
self.flist = flist
self.owin = None
def write(self, s, tags, mark):
if not self.owin:
self.setup()
self.owin.write(s, tags, mark)
def setup(self):
self.owin = owin = OutputWindow(self.flist)
text = owin.text
for tag, cnf in self.tagdefs.items():
if cnf:
text.tag_configure(tag, **cnf)
text.tag_raise('sel')
self.write = self.owin.write
#class PseudoFile:
#
# def __init__(self, owin, tags, mark="end"):
# self.owin = owin
# self.tags = tags
# self.mark = mark
# def write(self, s):
# self.owin.write(s, self.tags, self.mark)
# def writelines(self, l):
# map(self.write, l)
# def flush(self):
# pass
|
Dandandan/wikiprogramming
|
refs/heads/master
|
jsrepl/extern/python/unclosured/lib/python2.7/smtpd.py
|
76
|
#! /usr/bin/env python
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <barry@python.org>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <person@dom.com> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
try:
asyncore.loop()
except KeyboardInterrupt:
pass
|
mpreisler/scap-security-guide-debian
|
refs/heads/master
|
scap-security-guide-0.1.21/OpenStack/utils/verify-references.py
|
3
|
#!/usr/bin/python
import sys
# always use shared/modules version
SHARED_MODULE_PATH = "../../shared/modules"
sys.path.insert(0, SHARED_MODULE_PATH)
import verify_references_module
if __name__ == "__main__":
verify_references_module.main()
|
passiweinberger/nupic
|
refs/heads/master
|
src/nupic/swarming/HypersearchV2.py
|
31
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import logging
import json
import hashlib
import itertools
import StringIO
import shutil
import tempfile
import copy
import pprint
from operator import itemgetter
from nupic.frameworks.opf import opfhelpers
from nupic.swarming.hypersearch.utils import sortedJSONDumpS, rApply, rCopy
from nupic.support.configuration import Configuration
from nupic.swarming.hypersearch.utils import clippedObj
from nupic.swarming.hypersearch.errorcodes import ErrorCodes
from nupic.swarming.hypersearch.experimentutils import InferenceType
from nupic.database.ClientJobsDAO import (
ClientJobsDAO, InvalidConnectionException)
from nupic.swarming.hypersearch.utils import (runModelGivenBaseAndParams,
runDummyModel)
from nupic.swarming.permutationhelpers import *
from nupic.swarming.exp_generator.ExpGenerator import expGenerator
def _flattenKeys(keys):
return '|'.join(keys)
class SwarmTerminator(object):
"""Class that records the performane of swarms in a sprint and makes
decisions about which swarms should stop running. This is a usful optimization
that identifies field combinations that no longer need to be run.
"""
MATURITY_WINDOW = None
MAX_GENERATIONS = None
_DEFAULT_MILESTONES = [1.0 / (x + 1) for x in xrange(12)]
def __init__(self, milestones=None, logLevel=None):
# Set class constants.
self.MATURITY_WINDOW = int(Configuration.get(
"nupic.hypersearch.swarmMaturityWindow"))
self.MAX_GENERATIONS = int(Configuration.get(
"nupic.hypersearch.swarmMaxGenerations"))
if self.MAX_GENERATIONS < 0:
self.MAX_GENERATIONS = None
# Set up instsance variables.
self._isTerminationEnabled = bool(int(Configuration.get(
'nupic.hypersearch.enableSwarmTermination')))
self.swarmBests = dict()
self.swarmScores = dict()
self.terminatedSwarms = set([])
self._logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
if milestones is not None:
self.milestones = milestones
else:
self.milestones = copy.deepcopy(self._DEFAULT_MILESTONES)
def recordDataPoint(self, swarmId, generation, errScore):
"""Record the best score for a swarm's generation index (x)
Returns list of swarmIds to terminate.
"""
terminatedSwarms = []
# Append score to existing swarm.
if swarmId in self.swarmScores:
entry = self.swarmScores[swarmId]
assert(len(entry) == generation)
entry.append(errScore)
entry = self.swarmBests[swarmId]
entry.append(min(errScore, entry[-1]))
assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId]))
else:
# Create list of scores for a new swarm
assert (generation == 0)
self.swarmScores[swarmId] = [errScore]
self.swarmBests[swarmId] = [errScore]
# If the current swarm hasn't completed at least MIN_GENERATIONS, it should
# not be candidate for maturation or termination. This prevents the initial
# allocation of particles in PSO from killing off a field combination too
# early.
if generation + 1 < self.MATURITY_WINDOW:
return terminatedSwarms
# If the swarm has completed more than MAX_GENERATIONS, it should be marked
# as mature, regardless of how its value is changing.
if self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS:
self._logger.info(
'Swarm %s has matured (more than %d generations). Stopping' %
(swarmId, self.MAX_GENERATIONS))
terminatedSwarms.append(swarmId)
if self._isTerminationEnabled:
terminatedSwarms.extend(self._getTerminatedSwarms(generation))
# Return which swarms to kill when we've reached maturity
# If there is no change in the swarm's best for some time,
# Mark it dead
cumulativeBestScores = self.swarmBests[swarmId]
if cumulativeBestScores[-1] == cumulativeBestScores[-self.MATURITY_WINDOW]:
self._logger.info('Swarm %s has matured (no change in %d generations).'
'Stopping...'% (swarmId, self.MATURITY_WINDOW))
terminatedSwarms.append(swarmId)
self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms)
return terminatedSwarms
def numDataPoints(self, swarmId):
if swarmId in self.swarmScores:
return len(self.swarmScores[swarmId])
else:
return 0
def _getTerminatedSwarms(self, generation):
terminatedSwarms = []
generationScores = dict()
for swarm, scores in self.swarmScores.iteritems():
if len(scores) > generation and swarm not in self.terminatedSwarms:
generationScores[swarm] = scores[generation]
if len(generationScores) == 0:
return
bestScore = min(generationScores.values())
tolerance = self.milestones[generation]
for swarm, score in generationScores.iteritems():
if score > (1 + tolerance) * bestScore:
self._logger.info('Swarm %s is doing poorly at generation %d.\n'
'Current Score:%s \n'
'Best Score:%s \n'
'Tolerance:%s. Stopping...',
swarm, generation, score, bestScore, tolerance)
terminatedSwarms.append(swarm)
return terminatedSwarms
class ResultsDB(object):
"""This class holds all the information we have accumulated on completed
models, which particles were used, etc.
When we get updated results sent to us (via recordModelProgress), we
record it here for access later by various functions in this module.
"""
def __init__(self, hsObj):
""" Instantiate our results database
Parameters:
--------------------------------------------------------------------
hsObj: Reference to the HypersearchV2 instance
"""
self._hsObj = hsObj
# This list holds all the results we have so far on every model. In
# addition, we maintain mutliple other data structures which provide
# faster access into portions of this list
self._allResults = []
# Models that completed with errors and all completed.
# These are used to determine when we should abort because of too many
# errors
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
# Map of the model ID to index of result in _allResults
self._modelIDToIdx = dict()
# The global best result on the optimize metric so far, and the model ID
self._bestResult = numpy.inf
self._bestModelID = None
# This is a dict of dicts. The top level dict has the swarmId as the key.
# Each entry is a dict of genIdx: (modelId, errScore) entries.
self._swarmBestOverall = dict()
# For each swarm, we keep track of how many particles we have per generation
# The key is the swarmId, the value is a list of the number of particles
# at each generation
self._swarmNumParticlesPerGeneration = dict()
# The following variables are used to support the
# getMaturedSwarmGenerations() call.
#
# The _modifiedSwarmGens set contains the set of (swarmId, genIdx) tuples
# that have had results reported to them since the last time
# getMaturedSwarmGenerations() was called.
#
# The maturedSwarmGens contains (swarmId,genIdx) tuples, one for each
# swarm generation index which we have already detected has matured. This
# insures that if by chance we get a rogue report from a model in a swarm
# generation index which we have already assumed was matured that we won't
# report on it again.
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
# For each particle, we keep track of it's best score (across all
# generations) and the position it was at when it got that score. The keys
# in this dict are the particleId, the values are (bestResult, position),
# where position is a dict with varName:position items in it.
self._particleBest = dict()
# For each particle, we keep track of it's latest generation index.
self._particleLatestGenIdx = dict()
# For each swarm, we keep track of which models are in it. The key
# is the swarmId, the value is a list of indexes into self._allResults.
self._swarmIdToIndexes = dict()
# ParamsHash to index mapping
self._paramsHashToIndexes = dict()
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore
def getNumErrModels(self):
"""Return number of models that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return self._numErrModels
def getErrModelIds(self):
"""Return list of models IDs that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return list(self._errModels)
def getNumCompletedModels(self):
"""Return total number of models that completed.
Parameters:
---------------------------------------------------------------------
retval: # if models that completed
"""
return self._numCompletedModels
def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None
def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']])
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore)
def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured'])
def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result
def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0]
def highestGeneration(self, swarmId):
""" Return the generation index of the highest generation in the given
swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
retval: generation index
"""
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
return len(numPsPerGen)-1
def getParticleBest(self, particleId):
""" Return the best score and position for a given particle. The position
is given as a dict, with varName:varPosition items in it.
Parameters:
---------------------------------------------------------------------
particleId: which particle
retval: (bestResult, bestPosition)
"""
return self._particleBest.get(particleId, (None, None))
def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results
class Particle(object):
"""Construct a particle. Each particle evaluates one or more models
serially. Each model represents a position that the particle is evaluated
at.
Each position is a set of values chosen for each of the permutation variables.
The particle's best position is the value of the permutation variables when it
did best on the optimization metric.
Some permutation variables are treated like traditional particle swarm
variables - that is they have a position and velocity. Others are simply
choice variables, for example a list of strings. We follow a different
methodology for choosing each permutation variable value depending on its
type.
A particle belongs to 1 and only 1 swarm. A swarm is a collection of particles
that all share the same global best position. A swarm is identified by its
specific combination of fields. If we are evaluating multiple different field
combinations, then there will be multiple swarms. A Hypersearch Worker (HSW)
will only instantiate and run one particle at a time. When done running a
particle, another worker can pick it up, pick a new position, for it and run
it based on the particle state information which is stored in each model table
entry.
Each particle has a generationIdx. It starts out at generation #0. Every time
a model evaluation completes and the particle is moved to a different position
(to evaluate a different model), the generation index is incremented.
Every particle that is created has a unique particleId. The particleId
is a string formed as '<workerConnectionId>.<particleIdx>', where particleIdx
starts at 0 for each worker and increments by 1 every time a new particle
is created by that worker.
"""
_nextParticleID = 0
def __init__(self, hsObj, resultsDB, flattenedPermuteVars,
swarmId=None, newFarFrom=None, evolveFromState=None,
newFromClone=None, newParticleId=False):
""" Create a particle.
There are 3 fundamentally different methods of instantiating a particle:
1.) You can instantiate a new one from scratch, at generation index #0. This
particle gets a new particleId.
required: swarmId
optional: newFarFrom
must be None: evolveFromState, newFromClone
2.) You can instantiate one from savedState, in which case it's generation
index is incremented (from the value stored in the saved state) and
its particleId remains the same.
required: evolveFromState
optional:
must be None: flattenedPermuteVars, swarmId, newFromClone
3.) You can clone another particle, creating a new particle at the same
generationIdx but a different particleId. This new particle will end
up at exactly the same position as the one it was cloned from. If
you want to move it to the next position, or just jiggle it a bit, call
newPosition() or agitate() after instantiation.
required: newFromClone
optional:
must be None: flattenedPermuteVars, swarmId, evolveFromState
Parameters:
--------------------------------------------------------------------
hsObj: The HypersearchV2 instance
resultsDB: the ResultsDB instance that holds all the model results
flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs
of the flattened permutation variables as read from the permutations
file.
swarmId: String that represents the encoder names of the encoders that are
to be included in this particle's model. Of the form
'encoder1.encoder2'.
Required for creation method #1.
newFarFrom: If not None, this is a list of other particleState dicts in the
swarm that we want to be as far away from as possible. Optional
argument for creation method #1.
evolveFromState: If not None, evolve an existing particle. This is a
dict containing the particle's state. Preserve the particleId, but
increment the generation index. Required for creation method #2.
newFromClone: If not None, clone this other particle's position and generation
index, with small random perturbations. This is a dict containing the
particle's state. Required for creation method #3.
newParticleId: Only applicable when newFromClone is True. Give the clone
a new particle ID.
"""
# Save constructor arguments
self._hsObj = hsObj
self.logger = hsObj.logger
self._resultsDB = resultsDB
# See the random number generator used for all the variables in this
# particle. We will seed it differently based on the construction method,
# below.
self._rng = random.Random()
self._rng.seed(42)
# Setup our variable set by taking what's in flattenedPermuteVars and
# stripping out vars that belong to encoders we are not using.
def _setupVars(flattenedPermuteVars):
allowedEncoderNames = self.swarmId.split('.')
self.permuteVars = copy.deepcopy(flattenedPermuteVars)
# Remove fields we don't want.
varNames = self.permuteVars.keys()
for varName in varNames:
# Remove encoders we're not using
if ':' in varName: # if an encoder
if varName.split(':')[0] not in allowedEncoderNames:
self.permuteVars.pop(varName)
continue
# All PermuteChoice variables need to know all prior results obtained
# with each choice.
if isinstance(self.permuteVars[varName], PermuteChoices):
if self._hsObj._speculativeParticles:
maxGenIdx = None
else:
maxGenIdx = self.genIdx-1
resultsPerChoice = self._resultsDB.getResultsPerChoice(
swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName)
self.permuteVars[varName].setResultsPerChoice(
resultsPerChoice.values())
# Method #1
# Create from scratch, optionally pushing away from others that already
# exist.
if swarmId is not None:
assert (evolveFromState is None)
assert (newFromClone is None)
# Save construction param
self.swarmId = swarmId
# Assign a new unique ID to this particle
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
# Init the generation index
self.genIdx = 0
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Push away from other particles?
if newFarFrom is not None:
for varName in self.permuteVars.iterkeys():
otherPositions = []
for particleState in newFarFrom:
otherPositions.append(particleState['varStates'][varName]['position'])
self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng)
# Give this particle a unique seed.
self._rng.seed(str(otherPositions))
# Method #2
# Instantiate from saved state, preserving particleId but incrementing
# generation index.
elif evolveFromState is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (newFromClone is None)
# Setup other variables from saved state
self.particleId = evolveFromState['id']
self.genIdx = evolveFromState['genIdx'] + 1
self.swarmId = evolveFromState['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# saved state
self.initStateFrom(self.particleId, evolveFromState, newBest=True)
# Move it to the next position. We need the swarm best for this.
self.newPosition()
# Method #3
# Clone another particle, producing a new particle at the same genIdx with
# the same particleID. This is used to re-run an orphaned model.
elif newFromClone is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (evolveFromState is None)
# Setup other variables from clone particle
self.particleId = newFromClone['id']
if newParticleId:
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
self.genIdx = newFromClone['genIdx']
self.swarmId = newFromClone['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# the clone
self.initStateFrom(self.particleId, newFromClone, newBest=False)
else:
assert False, "invalid creation parameters"
# Log it
self.logger.debug("Created particle: %s" % (str(self)))
def __repr__(self):
return "Particle(swarmId=%s) [particleId=%s, genIdx=%d, " \
"permuteVars=\n%s]" % (self.swarmId, self.particleId,
self.genIdx, pprint.pformat(self.permuteVars, indent=4))
def getState(self):
"""Get the particle state as a dict. This is enough information to
instantiate this particle on another worker."""
varStates = dict()
for varName, var in self.permuteVars.iteritems():
varStates[varName] = var.getState()
return dict(id = self.particleId,
genIdx = self.genIdx,
swarmId = self.swarmId,
varStates = varStates)
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState)
def copyEncoderStatesFrom(self, particleState):
"""Copy all encoder variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if ':' in varName: # if an encoder
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def getPosition(self):
"""Return the position of this particle. This returns a dict() of key
value pairs where each key is the name of the flattened permutation
variable and the value is its chosen value.
Parameters:
--------------------------------------------------------------
retval: dict() of flattened permutation choices
"""
result = dict()
for (varName, value) in self.permuteVars.iteritems():
result[varName] = value.getPosition()
return result
@staticmethod
def getPositionFromState(pState):
"""Return the position of a particle given its state dict.
Parameters:
--------------------------------------------------------------
retval: dict() of particle position, keys are the variable names,
values are their positions
"""
result = dict()
for (varName, value) in pState['varStates'].iteritems():
result[varName] = value['position']
return result
def agitate(self):
"""Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None
"""
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition()
def newPosition(self, whichVars=None):
# TODO: incorporate data from choice variables....
# TODO: make sure we're calling this when appropriate.
"""Choose a new position based on results obtained so far from all other
particles.
Parameters:
--------------------------------------------------------------
whichVars: If not None, only move these variables
retval: new position
"""
# Get the global best position for this swarm generation
globalBestPosition = None
# If speculative particles are enabled, use the global best considering
# even particles in the current generation. This gives better results
# but does not provide repeatable results because it depends on
# worker timing
if self._hsObj._speculativeParticles:
genIdx = self.genIdx
else:
genIdx = self.genIdx - 1
if genIdx >= 0:
(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId, genIdx)
if bestModelId is not None:
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(bestModelId)
globalBestPosition = Particle.getPositionFromState(particleState)
# Update each variable
for (varName, var) in self.permuteVars.iteritems():
if whichVars is not None and varName not in whichVars:
continue
if globalBestPosition is None:
var.newPosition(None, self._rng)
else:
var.newPosition(globalBestPosition[varName], self._rng)
# get the new position
position = self.getPosition()
# Log the new position
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "New particle position: \n%s" % (pprint.pformat(position,
indent=4))
print >> msg, "Particle variables:"
for (varName, var) in self.permuteVars.iteritems():
print >> msg, " %s: %s" % (varName, str(var))
self.logger.debug(msg.getvalue())
msg.close()
return position
class HsState(object):
"""This class encapsulates the Hypersearch state which we share with all
other workers. This state gets serialized into a JSON dict and written to
the engWorkerState field of the job record.
Whenever a worker changes this state, it does an atomic setFieldIfEqual to
insure it has the latest state as updated by any other worker as a base.
Here is an example snapshot of this state information:
swarms = {'a': {'status': 'completed', # 'active','completing','completed',
# or 'killed'
'bestModelId': <modelID>, # Only set for 'completed' swarms
'bestErrScore': <errScore>, # Only set for 'completed' swarms
'sprintIdx': 0,
},
'a.b': {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 1,
}
}
sprints = [{'status': 'completed', # 'active','completing','completed'
'bestModelId': <modelID>, # Only set for 'completed' sprints
'bestErrScore': <errScore>, # Only set for 'completed' sprints
},
{'status': 'completing',
'bestModelId': <None>,
'bestErrScore': <None>
}
{'status': 'active',
'bestModelId': None
'bestErrScore': None
}
]
"""
def __init__(self, hsObj):
""" Create our state object.
Parameters:
---------------------------------------------------------------------
hsObj: Reference to the HypersesarchV2 instance
cjDAO: ClientJobsDAO instance
logger: logger to use
jobID: our JobID
"""
# Save constructor parameters
self._hsObj = hsObj
# Convenient access to the logger
self.logger = self._hsObj.logger
# This contains our current state, and local working changes
self._state = None
# This contains the state we last read from the database
self._priorStateJSON = None
# Set when we make a change to our state locally
self._dirty = False
# Read in the initial state
self.readStateFromDB()
def isDirty(self):
"""Return true if our local copy of the state has changed since the
last time we read from the DB.
"""
return self._dirty
def isSearchOver(self):
"""Return true if the search should be considered over."""
return self._state['searchOver']
def readStateFromDB(self):
"""Set our state to that obtained from the engWorkerState field of the
job record.
Parameters:
---------------------------------------------------------------------
stateJSON: JSON encoded state from job record
"""
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
# Init if no prior state yet
if self._priorStateJSON is None:
swarms = dict()
# Fast Swarm, first and only sprint has one swarm for each field
# in fixedFields
if self._hsObj._fixedFields is not None:
print self._hsObj._fixedFields
encoderSet = []
for field in self._hsObj._fixedFields:
if field =='_classifierInput':
continue
encoderName = self.getEncoderKeyFromName(field)
assert encoderName in self._hsObj._encoderNames, "The field '%s' " \
" specified in the fixedFields list is not present in this " \
" model." % (field)
encoderSet.append(encoderName)
encoderSet.sort()
swarms['.'.join(encoderSet)] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Temporal prediction search, first sprint has N swarms of 1 field each,
# the predicted field may or may not be that one field.
elif self._hsObj._searchType == HsSearchType.temporal:
for encoderName in self._hsObj._encoderNames:
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Classification prediction search, first sprint has N swarms of 1 field
# each where this field can NOT be the predicted field.
elif self._hsObj._searchType == HsSearchType.classification:
for encoderName in self._hsObj._encoderNames:
if encoderName == self._hsObj._predictedFieldEncoder:
continue
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Legacy temporal. This is either a model that uses reconstruction or
# an older multi-step model that doesn't have a separate
# 'classifierOnly' encoder for the predicted field. Here, the predicted
# field must ALWAYS be present and the first sprint tries the predicted
# field only
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
swarms[self._hsObj._predictedFieldEncoder] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
else:
raise RuntimeError("Unsupported search type: %s" % \
(self._hsObj._searchType))
# Initialize the state.
self._state = dict(
# The last time the state was updated by a worker.
lastUpdateTime = time.time(),
# Set from within setSwarmState() if we detect that the sprint we just
# completed did worse than a prior sprint. This stores the index of
# the last good sprint.
lastGoodSprint = None,
# Set from within setSwarmState() if lastGoodSprint is True and all
# sprints have completed.
searchOver = False,
# This is a summary of the active swarms - this information can also
# be obtained from the swarms entry that follows, but is summarized here
# for easier reference when viewing the state as presented by
# log messages and prints of the hsState data structure (by
# permutations_runner).
activeSwarms = swarms.keys(),
# All the swarms that have been created so far.
swarms = swarms,
# All the sprints that have completed or are in progress.
sprints = [{'status': 'active',
'bestModelId': None,
'bestErrScore': None}],
# The list of encoders we have "blacklisted" because they
# performed so poorly.
blackListedEncoders = [],
)
# This will do nothing if the value of engWorkerState is not still None.
self._hsObj._cjDAO.jobSetFieldIfEqual(
self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None)
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(
self._hsObj._jobID, ['engWorkerState'])[0]
assert (self._priorStateJSON is not None)
# Read state from the database
self._state = json.loads(self._priorStateJSON)
self._dirty = False
def writeStateToDB(self):
"""Update the state in the job record with our local changes (if any).
If we don't have the latest state in our priorStateJSON, then re-load
in the latest state and return False. If we were successful writing out
our changes, return True
Parameters:
---------------------------------------------------------------------
retval: True if we were successful writing out our changes
False if our priorState is not the latest that was in the DB.
In this case, we will re-load our state from the DB
"""
# If no changes, do nothing
if not self._dirty:
return True
# Set the update time
self._state['lastUpdateTime'] = time.time()
newStateJSON = json.dumps(self._state)
success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,
'engWorkerState', str(newStateJSON), str(self._priorStateJSON))
if success:
self.logger.debug("Success changing hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = newStateJSON
# If no success, read in the current state from the DB
else:
self.logger.debug("Failed to change hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
self._state = json.loads(self._priorStateJSON)
self.logger.info("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._state, indent=4)))
return success
def getEncoderNameFromKey(self, key):
""" Given an encoder dictionary key, get the encoder name.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return key.split('|')[-1]
def getEncoderKeyFromName(self, name):
""" Given an encoder name, get the key.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return 'modelParams|sensorParams|encoders|%s' % (name)
def getFieldContributions(self):
"""Return the field contributions statistics.
Parameters:
---------------------------------------------------------------------
retval: Dictionary where the keys are the field names and the values
are how much each field contributed to the best score.
"""
#in the fast swarm, there is only 1 sprint and field contributions are
#not defined
if self._hsObj._fixedFields is not None:
return dict(), dict()
# Get the predicted field encoder name
predictedEncoderName = self._hsObj._predictedFieldEncoder
# -----------------------------------------------------------------------
# Collect all the single field scores
fieldScores = []
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 1:
continue
field = self.getEncoderNameFromKey(encodersUsed[0])
bestScore = info['bestErrScore']
# If the bestScore is None, this swarm hasn't completed yet (this could
# happen if we're exiting because of maxModels), so look up the best
# score so far
if bestScore is None:
(_modelId, bestScore) = \
self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
fieldScores.append((bestScore, field))
# -----------------------------------------------------------------------
# If we only have 1 field that was tried in the first sprint, then use that
# as the base and get the contributions from the fields in the next sprint.
if self._hsObj._searchType == HsSearchType.legacyTemporal:
assert(len(fieldScores)==1)
(baseErrScore, baseField) = fieldScores[0]
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 2:
continue
fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]
fields.remove(baseField)
fieldScores.append((info['bestErrScore'], fields[0]))
# The first sprint tried a bunch of fields, pick the worst performing one
# (within the top self._hsObj._maxBranching ones) as the base
else:
fieldScores.sort(reverse=True)
# If maxBranching was specified, pick the worst performing field within
# the top maxBranching+1 fields as our base, which will give that field
# a contribution of 0.
if self._hsObj._maxBranching > 0 \
and len(fieldScores) > self._hsObj._maxBranching:
baseErrScore = fieldScores[-self._hsObj._maxBranching-1][0]
else:
baseErrScore = fieldScores[0][0]
# -----------------------------------------------------------------------
# Prepare and return the fieldContributions dict
pctFieldContributionsDict = dict()
absFieldContributionsDict = dict()
# If we have no base score, can't compute field contributions. This can
# happen when we exit early due to maxModels or being cancelled
if baseErrScore is not None:
# If the base error score is 0, we can't compute a percent difference
# off of it, so move it to a very small float
if abs(baseErrScore) < 0.00001:
baseErrScore = 0.00001
for (errScore, field) in fieldScores:
if errScore is not None:
pctBetter = (baseErrScore - errScore) * 100.0 / baseErrScore
else:
pctBetter = 0.0
errScore = baseErrScore # for absFieldContribution
pctFieldContributionsDict[field] = pctBetter
absFieldContributionsDict[field] = baseErrScore - errScore
self.logger.debug("FieldContributions: %s" % (pctFieldContributionsDict))
return pctFieldContributionsDict, absFieldContributionsDict
def getAllSwarms(self, sprintIdx):
"""Return the list of all swarms in the given sprint.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx:
swarmIds.append(swarmId)
return swarmIds
def getActiveSwarms(self, sprintIdx=None):
"""Return the list of active swarms in the given sprint. These are swarms
which still need new particles created in them.
Parameters:
---------------------------------------------------------------------
sprintIdx: which sprint to query. If None, get active swarms from all
sprints
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if sprintIdx is not None and info['sprintIdx'] != sprintIdx:
continue
if info['status'] == 'active':
swarmIds.append(swarmId)
return swarmIds
def getNonKilledSwarms(self, sprintIdx):
"""Return the list of swarms in the given sprint that were not killed.
This is called when we are trying to figure out which encoders to carry
forward to the next sprint. We don't want to carry forward encoder
combintations which were obviously bad (in killed swarms).
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx and info['status'] != 'killed':
swarmIds.append(swarmId)
return swarmIds
def getCompletedSwarms(self):
"""Return the list of all completed swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completed':
swarmIds.append(swarmId)
return swarmIds
def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds
def bestModelInCompletedSwarm(self, swarmId):
"""Return the best model ID and it's errScore from the given swarm.
If the swarm has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
swarmInfo = self._state['swarms'][swarmId]
return (swarmInfo['bestModelId'],
swarmInfo['bestErrScore'])
def bestModelInCompletedSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint.
If the sprint has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
sprintInfo = self._state['sprints'][sprintIdx]
return (sprintInfo['bestModelId'],
sprintInfo['bestErrScore'])
def bestModelInSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore)
def setSwarmState(self, swarmId, newStatus):
"""Change the given swarm's state to 'newState'. If 'newState' is
'completed', then bestModelId and bestErrScore must be provided.
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id
newStatus: new status, either 'active', 'completing', 'completed', or
'killed'
"""
assert (newStatus in ['active', 'completing', 'completed', 'killed'])
# Set the swarm status
swarmInfo = self._state['swarms'][swarmId]
if swarmInfo['status'] == newStatus:
return
# If some other worker noticed it as completed, setting it to completing
# is obviously old information....
if swarmInfo['status'] == 'completed' and newStatus == 'completing':
return
self._dirty = True
swarmInfo['status'] = newStatus
if newStatus == 'completed':
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
swarmInfo['bestModelId'] = modelId
swarmInfo['bestErrScore'] = errScore
# If no longer active, remove it from the activeSwarms entry
if newStatus != 'active' and swarmId in self._state['activeSwarms']:
self._state['activeSwarms'].remove(swarmId)
# If new status is 'killed', kill off any running particles in that swarm
if newStatus=='killed':
self._hsObj.killSwarmParticles(swarmId)
# In case speculative particles are enabled, make sure we generate a new
# swarm at this time if all of the swarms in the current sprint have
# completed. This will insure that we don't mark the sprint as completed
# before we've created all the possible swarms.
sprintIdx = swarmInfo['sprintIdx']
self.isSprintActive(sprintIdx)
# Update the sprint status. Check all the swarms that belong to this sprint.
# If they are all completed, the sprint is completed.
sprintInfo = self._state['sprints'][sprintIdx]
statusCounts = dict(active=0, completing=0, completed=0, killed=0)
bestModelIds = []
bestErrScores = []
for info in self._state['swarms'].itervalues():
if info['sprintIdx'] != sprintIdx:
continue
statusCounts[info['status']] += 1
if info['status'] == 'completed':
bestModelIds.append(info['bestModelId'])
bestErrScores.append(info['bestErrScore'])
if statusCounts['active'] > 0:
sprintStatus = 'active'
elif statusCounts['completing'] > 0:
sprintStatus = 'completing'
else:
sprintStatus = 'completed'
sprintInfo['status'] = sprintStatus
# If the sprint is complete, get the best model from all of its swarms and
# store that as the sprint best
if sprintStatus == 'completed':
if len(bestErrScores) > 0:
whichIdx = numpy.array(bestErrScores).argmin()
sprintInfo['bestModelId'] = bestModelIds[whichIdx]
sprintInfo['bestErrScore'] = bestErrScores[whichIdx]
else:
# This sprint was empty, most likely because all particles were
# killed. Give it a huge error score
sprintInfo['bestModelId'] = 0
sprintInfo['bestErrScore'] = numpy.inf
# See if our best err score got NO BETTER as compared to a previous
# sprint. If so, stop exploring subsequent sprints (lastGoodSprint
# is no longer None).
bestPrior = numpy.inf
for idx in range(sprintIdx):
if self._state['sprints'][idx]['status'] == 'completed':
(_, errScore) = self.bestModelInCompletedSprint(idx)
if errScore is None:
errScore = numpy.inf
else:
errScore = numpy.inf
if errScore < bestPrior:
bestPrior = errScore
if sprintInfo['bestErrScore'] >= bestPrior:
self._state['lastGoodSprint'] = sprintIdx-1
# If ALL sprints up to the last good one are done, the search is now over
if self._state['lastGoodSprint'] is not None \
and not self.anyGoodSprintsActive():
self._state['searchOver'] = True
def anyGoodSprintsActive(self):
"""Return True if there are any more good sprints still being explored.
A 'good' sprint is one that is earlier than where we detected an increase
in error from sprint to subsequent sprint.
"""
if self._state['lastGoodSprint'] is not None:
goodSprints = self._state['sprints'][0:self._state['lastGoodSprint']+1]
else:
goodSprints = self._state['sprints']
for sprint in goodSprints:
if sprint['status'] == 'active':
anyActiveSprints = True
break
else:
anyActiveSprints = False
return anyActiveSprints
def isSprintCompleted(self, sprintIdx):
"""Return True if the given sprint has completed."""
numExistingSprints = len(self._state['sprints'])
if sprintIdx >= numExistingSprints:
return False
return (self._state['sprints'][sprintIdx]['status'] == 'completed')
def killUselessSwarms(self):
"""See if we can kill off some speculative swarms. If an earlier sprint
has finally completed, we can now tell which fields should *really* be present
in the sprints we've already started due to speculation, and kill off the
swarms that should not have been included.
"""
# Get number of existing sprints
numExistingSprints = len(self._state['sprints'])
# Should we bother killing useless swarms?
if self._hsObj._searchType == HsSearchType.legacyTemporal:
if numExistingSprints <= 2:
return
else:
if numExistingSprints <= 1:
return
# Form completedSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# ex. completedSwarms:
# [('a', {...}, 1.4),
# ('b', {...}, 2.0),
# ('c', {...}, 3.0)]
completedSwarms = self.getCompletedSwarms()
completedSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in completedSwarms]
# Form the completedMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore).
# ex. completedMatrix:
# [(('a', {...}, 1.4), ('b', {...}, 2.0), ('c', {...}, 3.0)),
# (('a.b', {...}, 3.0), ('b.c', {...}, 4.0))]
completedMatrix = [[] for i in range(numExistingSprints)]
for swarm in completedSwarms:
completedMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in completedMatrix:
sprint.sort(key=itemgetter(2))
# Form activeSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# Include all activeSwarms and completingSwarms
# ex. activeSwarms:
# [('d', {...}, 1.4),
# ('e', {...}, 2.0),
# ('f', {...}, 3.0)]
activeSwarms = self.getActiveSwarms()
# Append the completing swarms
activeSwarms.extend(self.getCompletingSwarms())
activeSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in activeSwarms]
# Form the activeMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore)
# ex. activeMatrix:
# [(('d', {...}, 1.4), ('e', {...}, 2.0), ('f', {...}, 3.0)),
# (('d.e', {...}, 3.0), ('e.f', {...}, 4.0))]
activeMatrix = [[] for i in range(numExistingSprints)]
for swarm in activeSwarms:
activeMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in activeMatrix:
sprint.sort(key=itemgetter(2))
# Figure out which active swarms to kill
toKill = []
for i in range(1, numExistingSprints):
for swarm in activeMatrix[i]:
curSwarmEncoders = swarm[0].split(".")
# If previous sprint is complete, get the best swarm and kill all active
# sprints that are not supersets
if(len(activeMatrix[i-1])==0):
# If we are trying all possible 3 field combinations, don't kill any
# off in sprint 2
if i==2 and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
pass
else:
bestInPrevious = completedMatrix[i-1][0]
bestEncoders = bestInPrevious[0].split('.')
for encoder in bestEncoders:
if not encoder in curSwarmEncoders:
toKill.append(swarm)
# if there are more than two completed encoders sets that are complete and
# are worse than at least one active swarm in the previous sprint. Remove
# any combinations that have any pair of them since they cannot have the best encoder.
#elif(len(completedMatrix[i-1])>1):
# for completedSwarm in completedMatrix[i-1]:
# activeMatrix[i-1][0][2]<completed
# Mark the bad swarms as killed
if len(toKill) > 0:
print "ParseMe: Killing encoders:" + str(toKill)
for swarm in toKill:
self.setSwarmState(swarm[0], "killed")
return
def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints-1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
return (active, False)
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
else:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
if not active:
return (active, False)
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,
matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes \
if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False)
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True)
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True)
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 \
and self._state['sprints'][sprintIdx-1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(
bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')]
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
else:
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx-1):
baseEncoderSets.append(swarmId.split('.'))
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 \
or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or \
self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1
else:
raise RuntimeError("Unimplemented search type %s" % \
(self._hsObj._searchType))
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
pctFieldContributions, absFieldContributions = \
self.getFieldContributions()
toRemove = []
self.logger.debug("FieldContributions min: %s" % \
(self._hsObj._minFieldContribution))
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug("FieldContributions removing: %s" % (fieldname))
toRemove.append(self.getEncoderKeyFromName(fieldname))
else:
self.logger.debug("FieldContributions keeping: %s" % (fieldname))
# Grab the top maxBranching base sprint swarms.
swarms = self._state["swarms"]
sprintSwarms = [(swarm, swarms[swarm]["bestErrScore"]) \
for swarm in swarms if swarms[swarm]["sprintIdx"] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split(".")
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder)
encoderAddSet = [encoder for encoder in encoderAddSet \
if not str(encoder) in toRemove]
# If no limit on the branching or min contribution, simply use all of the
# encoders.
else:
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal \
or self._hsObj._searchType == HsSearchType.legacyTemporal) \
and sprintIdx == 2 \
and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') \
or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder)
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
else:
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] \
and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False)
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
else:
return (False, True)
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state["sprints"]) == sprintIdx:
self._state['sprints'].append({'status': 'active',
'bestModelId': None,
'bestErrScore': None})
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': sprintIdx}
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False)
# No success, loop back with the updated state and try again
class HsSearchType(object):
"""This class enumerates the types of search we can perform."""
temporal = 'temporal'
legacyTemporal = 'legacyTemporal'
classification = 'classification'
class HypersearchV2(object):
"""The v2 Hypersearch implementation. This is one example of a Hypersearch
implementation that can be used by the HypersearchWorker. Other implementations
just have to implement the following methods:
createModels()
recordModelProgress()
getPermutationVariables()
getComplexVariableLabelLookupDict()
This implementation uses a hybrid of Particle Swarm Optimization (PSO) and
the old "ronamatic" logic from Hypersearch V1. Variables which are lists of
choices (i.e. string values, integer values that represent different
categories) are searched using the ronamatic logic whereas floats and
integers that represent a range of values are searched using PSO.
For prediction experiments, this implementation starts out evaluating only
single encoder models that encode the predicted field. This is the first
"sprint". Once it finds the optimum set of variables for that, it starts to
build up by adding in combinations of 2 fields (the second "sprint"), where
one of them is the predicted field. Once the top 2-field combination(s) are
discovered, it starts to build up on those by adding in a 3rd field, etc.
Each new set of field combinations is called a sprint.
For classification experiments, this implementation starts out evaluating two
encoder models, where one of the encoders is the classified field. This is the
first "sprint". Once it finds the optimum set of variables for that, it starts
to build up by evauating combinations of 3 fields (the second "sprint"), where
two of them are the best 2 fields found in the first sprint (one of those of
course being the classified field). Once the top 3-field combination(s) are
discovered, it starts to build up on those by adding in a 4th field, etc.
In classification models, the classified field, although it has an encoder, is
not sent "into" the network. Rather, the encoded value just goes directly to
the classifier as the classifier input.
At any one time, there are 1 or more swarms being evaluated at the same time -
each swarm representing a certain field combination within the sprint. We try
to load balance the swarms and have the same number of models evaluated for
each swarm at any one time. Each swarm contains N particles, and we also try
to keep N >= some mininum number. Each position of a particle corresponds to a
model.
When a worker is ready to evaluate a new model, it first picks the swarm with
the least number of models so far (least number of evaluated particle
positions). If that swarm does not have the min number of particles in it yet,
or does not yet have a particle created by this worker, the worker will create
a new particle, else it will choose another particle from that swarm that it
had created in the past which has the least number of evaluated positions so
far.
"""
def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,
logLevel=None):
"""Instantiate the HyperseachV2 instance.
Parameters:
----------------------------------------------------------------------
searchParams: a dict of the job's search parameters. The format is:
persistentJobGUID: REQUIRED.
Persistent, globally-unique identifier for this job
for use in constructing persistent model checkpoint
keys. MUST be compatible with S3 key-naming rules, but
MUST NOT contain forward slashes. This GUID is
expected to retain its global uniqueness across
clusters and cluster software updates (unlike the
record IDs in the Engine's jobs table, which recycle
upon table schema change and software update). In the
future, this may also be instrumental for checkpoint
garbage collection.
permutationsPyFilename:
OPTIONAL - path to permutations.py file
permutationsPyContents:
OPTIONAL - JSON encoded string with
contents of permutations.py file
descriptionPyContents:
OPTIONAL - JSON encoded string with
contents of base description.py file
description: OPTIONAL - JSON description of the search
createCheckpoints: OPTIONAL - Whether to create checkpoints
useTerminators OPTIONAL - True of False (default config.xml). When set
to False, the model and swarm terminators
are disabled
maxModels: OPTIONAL - max # of models to generate
NOTE: This is a deprecated location for this
setting. Now, it should be specified through
the maxModels variable within the permutations
file, or maxModels in the JSON description
dummyModel: OPTIONAL - Either (True/False) or a dict of parameters
for a dummy model. If this key is absent,
a real model is trained.
See utils.py/OPFDummyModel runner for the
schema of the dummy parameters
speculativeParticles OPTIONAL - True or False (default obtained from
nupic.hypersearch.speculative.particles.default
configuration property). See note below.
NOTE: The caller must provide just ONE of the following to describe the
hypersearch:
1.) permutationsPyFilename
OR 2.) permutationsPyContents & permutationsPyContents
OR 3.) description
The schema for the description element can be found at:
"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json"
NOTE about speculativeParticles: If true (not 0), hypersearch workers will
go ahead and create and run particles in subsequent sprints and
generations before the current generation or sprint has been completed. If
false, a worker will wait in a sleep loop until the current generation or
sprint has finished before choosing the next particle position or going
into the next sprint. When true, the best model can be found faster, but
results are less repeatable due to the randomness of when each worker
completes each particle. This property can be overridden via the
speculativeParticles element of the Hypersearch job params.
workerID: our unique Hypersearch worker ID
cjDAO: ClientJobsDB Data Access Object
jobID: job ID for this hypersearch job
logLevel: override logging level to this value, if not None
"""
# Instantiate our logger
self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
# Override log level?
if logLevel is not None:
self.logger.setLevel(logLevel)
# This is how to check the logging level
#if self.logger.getEffectiveLevel() <= logging.DEBUG:
# print "at debug level"
# Init random seed
random.seed(42)
# Save the search info
self._searchParams = searchParams
self._workerID = workerID
self._cjDAO = cjDAO
self._jobID = jobID
# Log search params
self.logger.info("searchParams: \n%s" % (pprint.pformat(
clippedObj(searchParams))))
self._createCheckpoints = self._searchParams.get('createCheckpoints',
False)
self._maxModels = self._searchParams.get('maxModels', None)
if self._maxModels == -1:
self._maxModels = None
self._predictionCacheMaxRecords = self._searchParams.get('predictionCacheMaxRecords', None)
# Speculative particles?
self._speculativeParticles = self._searchParams.get('speculativeParticles',
bool(int(Configuration.get(
'nupic.hypersearch.speculative.particles.default'))))
self._speculativeWaitSecondsMax = float(Configuration.get(
'nupic.hypersearch.speculative.particles.sleepSecondsMax'))
# Maximum Field Branching
self._maxBranching= int(Configuration.get(
'nupic.hypersearch.max.field.branching'))
# Minimum Field Contribution
self._minFieldContribution= float(Configuration.get(
'nupic.hypersearch.min.field.contribution'))
# This gets set if we detect that the job got cancelled
self._jobCancelled = False
# Use terminators (typically set by permutations_runner.py)
if 'useTerminators' in self._searchParams:
useTerminators = self._searchParams['useTerminators']
useTerminators = str(int(useTerminators))
Configuration.set('nupic.hypersearch.enableModelTermination', useTerminators)
Configuration.set('nupic.hypersearch.enableModelMaturity', useTerminators)
Configuration.set('nupic.hypersearch.enableSwarmTermination', useTerminators)
# Special test mode?
if 'NTA_TEST_exitAfterNModels' in os.environ:
self._maxModels = int(os.environ['NTA_TEST_exitAfterNModels'])
self._dummyModel = self._searchParams.get('dummyModel', None)
# Holder for temporary directory, if any, that needs to be cleaned up
# in our close() method.
self._tempDir = None
try:
# Get the permutations info. This can be either:
# 1.) JSON encoded search description (this will be used to generate a
# permutations.py and description.py files using ExpGenerator)
# 2.) path to a pre-generated permutations.py file. The description.py is
# assumed to be in the same directory
# 3.) contents of the permutations.py and descrption.py files.
if 'description' in self._searchParams:
if ('permutationsPyFilename' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
# Calculate training period for anomaly models
searchParamObj = self._searchParams
anomalyParams = searchParamObj['description'].get('anomalyParams',
dict())
# This is used in case searchParamObj['description']['anomalyParams']
# is set to None.
if anomalyParams is None:
anomalyParams = dict()
if (('autoDetectWaitRecords' not in anomalyParams) or
(anomalyParams['autoDetectWaitRecords'] is None)):
streamDef = self._getStreamDef(searchParamObj['description'])
from nupic.data.stream_reader import StreamReader
try:
streamReader = StreamReader(streamDef, isBlocking=False,
maxTimeout=0, eofOnTimeout=True)
anomalyParams['autoDetectWaitRecords'] = \
streamReader.getDataRowCount()
except Exception:
anomalyParams['autoDetectWaitRecords'] = None
self._searchParams['description']['anomalyParams'] = anomalyParams
# Call the experiment generator to generate the permutations and base
# description file.
outDir = self._tempDir = tempfile.mkdtemp()
expGenerator([
'--description=%s' % (
json.dumps(self._searchParams['description'])),
'--version=v2',
'--outDir=%s' % (outDir)])
# Get the name of the permutations script.
permutationsScript = os.path.join(outDir, 'permutations.py')
elif 'permutationsPyFilename' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or "
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
permutationsScript = self._searchParams['permutationsPyFilename']
elif 'permutationsPyContents' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyFilename' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
assert ('descriptionPyContents' in self._searchParams)
# Generate the permutations.py and description.py files
outDir = self._tempDir = tempfile.mkdtemp()
permutationsScript = os.path.join(outDir, 'permutations.py')
fd = open(permutationsScript, 'w')
fd.write(self._searchParams['permutationsPyContents'])
fd.close()
fd = open(os.path.join(outDir, 'description.py'), 'w')
fd.write(self._searchParams['descriptionPyContents'])
fd.close()
else:
raise RuntimeError ("Either 'description' or 'permutationsScript' must be"
"specified")
# Get the base path of the experiment and read in the base description
self._basePath = os.path.dirname(permutationsScript)
self._baseDescription = open(os.path.join(self._basePath,
'description.py')).read()
self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()
# Read the model config to figure out the inference type
modelDescription, _ = opfhelpers.loadExperiment(self._basePath)
# Read info from permutations file. This sets up the following member
# variables:
# _predictedField
# _permutations
# _flattenedPermutations
# _encoderNames
# _reportKeys
# _filterFunc
# _optimizeKey
# _maximize
# _dummyModelParamsFunc
self._readPermutationsFile(permutationsScript, modelDescription)
# Fill in and save the base description and permutations file contents
# if they haven't already been filled in by another worker
if self._cjDAO is not None:
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genBaseDescription',
curValue=None,
newValue = self._baseDescription)
if updated:
permContents = open(permutationsScript).read()
self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genPermutations',
curValue=None,
newValue = permContents)
# if user provided an artificialMetric, force use of the dummy model
if self._dummyModelParamsFunc is not None:
if self._dummyModel is None:
self._dummyModel = dict()
# If at DEBUG log level, print out permutations info to the log
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "Permutations file specifications: "
info = dict()
for key in ['_predictedField', '_permutations',
'_flattenedPermutations', '_encoderNames',
'_reportKeys', '_optimizeKey', '_maximize']:
info[key] = getattr(self, key)
print >> msg, pprint.pformat(info)
self.logger.debug(msg.getvalue())
msg.close()
# Instantiate our database to hold the results we received so far
self._resultsDB = ResultsDB(self)
# Instantiate the Swarm Terminator
self._swarmTerminator = SwarmTerminator()
# Initial hypersearch state
self._hsState = None
# The Max # of attempts we will make to create a unique model before
# giving up.
self._maxUniqueModelAttempts = int(Configuration.get(
'nupic.hypersearch.maxUniqueModelAttempts'))
# The max amount of time allowed before a model is considered orphaned.
self._modelOrphanIntervalSecs = float(Configuration.get(
'nupic.hypersearch.modelOrphanIntervalSecs'))
# The max percent of models that can complete with errors
self._maxPctErrModels = float(Configuration.get(
'nupic.hypersearch.maxPctErrModels'))
except:
# Clean up our temporary directory, if any
if self._tempDir is not None:
shutil.rmtree(self._tempDir)
self._tempDir = None
raise
return
def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef
def __del__(self):
"""Destructor; NOTE: this is not guaranteed to be called (bugs like
circular references could prevent it from being called).
"""
self.close()
return
def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate CLA model results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations)
def getExpectedNumModels(self):
"""Computes the number of models that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py for use in progress
reporting.
Parameters:
---------------------------------------------------------
retval: The total number of expected models, if known; -1 if unknown
"""
return -1
def getModelNames(self):
"""Generates a list of model names that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py.
Parameters:
---------------------------------------------------------
retval: List of model names for this HypersearchV2 instance, or
None of not applicable
"""
return None
def getPermutationVariables(self):
"""Returns a dictionary of permutation variables.
Parameters:
---------------------------------------------------------
retval: A dictionary of permutation variables; keys are
flat permutation variable names and each value is
a sub-class of PermuteVariable.
"""
return self._flattenedPermutations
def getComplexVariableLabelLookupDict(self):
"""Generates a lookup dictionary of permutation variables whose values
are too complex for labels, so that artificial labels have to be generated
for them.
Parameters:
---------------------------------------------------------
retval: A look-up dictionary of permutation
variables whose values are too complex for labels, so
artificial labels were generated instead (e.g., "Choice0",
"Choice1", etc.); the key is the name of the complex variable
and the value is:
dict(labels=<list_of_labels>, values=<list_of_values>).
"""
raise NotImplementedError
def getOptimizationMetricInfo(self):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
return (self._optimizeKey, self._maximize)
def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0)
def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4)))
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None)
# END: while True:
# No success in this sprint, into next sprint
def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True
def killSwarmParticles(self, swarmID):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmID, completed=False)
for modelId in modelIds:
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmID,
str(modelIds)))
self._cjDAO.modelSetFields(
modelId, dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged=True)
def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults)
def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID))
def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e)
|
matiasmenares/Shuffle
|
refs/heads/master
|
core/terminal.py
|
1
|
import sys
import os
import urllib
import json
import requests
from core.server import Server
class Terminal:
def __init__(self,url,password):
self.url = url
self.password = password
self.server = Server(url,password)
def terminal(self,send,cookie):
command = self.command(send)
if command == False:
return self.execute(send,cookie)
def command(self,send):
if send == "exit":
print "\n#> Connection Closet by user."
sys.exit(2)
else:
return False
def execute(self,cmd,cookie):
if self.server.beat():
return json.loads(requests.post(self.url, data = {'pass': self.password,'cmd': cmd} ,cookies=cookie).text)
def loop(self):
server = self.server.connection()
if server['response']:
print "#> Conecction Established, Enjoy!\n"
while True:
info = self.server.info(server['cookie'])
send = raw_input(info['server_name']+"@"+info['user']+"["+info['pwd'].rstrip('\n')+"]"+info['user_bash']+">")
terminal = self.terminal(send,server['cookie'])
print terminal['command']
elif server['response'] == False:
print "robot@shuffle[~]$> Response: "+server['msg']
else:
print "robot@shuffle[~]$> Connection fail."
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/validators/funnelarea/domain/_row.py
|
1
|
import _plotly_utils.basevalidators
class RowValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="row", parent_name="funnelarea.domain", **kwargs):
super(RowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs
)
|
azaghal/ansible
|
refs/heads/devel
|
test/units/utils/test_shlex.py
|
197
|
# (c) 2015, Marius Gedminas <marius@gedmin.as>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import unittest
from ansible.utils.shlex import shlex_split
class TestSplit(unittest.TestCase):
def test_trivial(self):
self.assertEqual(shlex_split("a b c"), ["a", "b", "c"])
def test_unicode(self):
self.assertEqual(shlex_split(u"a b \u010D"), [u"a", u"b", u"\u010D"])
def test_quoted(self):
self.assertEqual(shlex_split('"a b" c'), ["a b", "c"])
def test_comments(self):
self.assertEqual(shlex_split('"a b" c # d', comments=True), ["a b", "c"])
def test_error(self):
self.assertRaises(ValueError, shlex_split, 'a "b')
|
rfhk/awo-custom
|
refs/heads/8.0
|
stock_transfer_lot_filter/__init__.py
|
6
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) Rooms For (Hong Kong) Limited T/A OSCG (<http://www.openerp-asia.net>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock
import stock_transfer_detail
|
RevelSystems/django
|
refs/heads/master
|
tests/sites_tests/tests.py
|
27
|
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
self.assertRaises(Site.DoesNotExist, Site.objects.get_current)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ttest"
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ntest"
self.assertRaises(ValidationError, site.full_clean)
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='')
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
@override_settings() # Restore original ``SITE_ID`` afterwards.
def test_no_site_id(self):
"""
#24488 - The pk should default to 1 if no ``SITE_ID`` is configured.
"""
del settings.SITE_ID
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 1)
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.