repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
xtao/code | refs/heads/master | vilya/models/ngit/submodule.py | 3 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
class Submodule(object):
def __init__(self, url, path):
self._url = url
self._path = path
if url.find('code.dapps.douban.com/'):
self._host = 'code'
elif url.find('github.com/'):
self._host = 'github'
else:
self._host = 'other'
def as_dic(self):
return {
"type": "submodule",
"url": self._url,
"path": self._path,
"host": self._host
}
|
mitodl/xanalytics | refs/heads/master | pytest/hookspec.py | 3 | """ hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
# -------------------------------------------------------------------------
# Initialization
# -------------------------------------------------------------------------
def pytest_addhooks(pluginmanager):
"""called at plugin load time to allow adding new hooks via a call to
pluginmanager.registerhooks(module)."""
def pytest_namespace():
"""return dict of name->object to be made globally available in
the pytest namespace. This hook is called before command line options
are parsed.
"""
def pytest_cmdline_parse(pluginmanager, args):
"""return initialized config object, parsing the specified args. """
pytest_cmdline_parse.firstresult = True
def pytest_cmdline_preparse(config, args):
"""(deprecated) modify command line arguments before option parsing. """
def pytest_addoption(parser):
"""register argparse-style options and ini-style config values.
This function must be implemented in a :ref:`plugin <pluginorder>` and is
called once at the beginning of a test run.
:arg parser: To add command line options, call
:py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
To add ini-file values call :py:func:`parser.addini(...)
<_pytest.config.Parser.addini>`.
Options can later be accessed through the
:py:class:`config <_pytest.config.Config>` object, respectively:
- :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
retrieve the value of a command line option.
- :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
a value read from an ini-style file.
The config object is passed around on many internal objects via the ``.config``
attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
via (deprecated) ``pytest.config``.
"""
def pytest_cmdline_main(config):
""" called for performing the main command line action. The default
implementation will invoke the configure hooks and runtest_mainloop. """
pytest_cmdline_main.firstresult = True
def pytest_load_initial_conftests(args, early_config, parser):
""" implements the loading of initial conftest files ahead
of command line option parsing. """
def pytest_configure(config):
""" called after command line options have been parsed
and all plugins and initial conftest files been loaded.
"""
def pytest_unconfigure(config):
""" called before test process is exited. """
def pytest_runtestloop(session):
""" called for performing the main runtest loop
(after collection finished). """
pytest_runtestloop.firstresult = True
# -------------------------------------------------------------------------
# collection hooks
# -------------------------------------------------------------------------
def pytest_collection(session):
""" perform the collection protocol for the given session. """
pytest_collection.firstresult = True
def pytest_collection_modifyitems(session, config, items):
""" called after collection has been performed, may filter or re-order
the items in-place."""
def pytest_collection_finish(session):
""" called after collection has been performed and modified. """
def pytest_ignore_collect(path, config):
""" return True to prevent considering this path for collection.
This hook is consulted for all files and directories prior to calling
more specific hooks.
"""
pytest_ignore_collect.firstresult = True
def pytest_collect_directory(path, parent):
""" called before traversing a directory for collection files. """
pytest_collect_directory.firstresult = True
def pytest_collect_file(path, parent):
""" return collection Node or None for the given path. Any new node
needs to have the specified ``parent`` as a parent."""
# logging hooks for collection
def pytest_collectstart(collector):
""" collector starts collecting. """
def pytest_itemcollected(item):
""" we just collected a test item. """
def pytest_collectreport(report):
""" collector finished collecting. """
def pytest_deselected(items):
""" called for test items deselected by keyword. """
def pytest_make_collect_report(collector):
""" perform ``collector.collect()`` and return a CollectReport. """
pytest_make_collect_report.firstresult = True
# -------------------------------------------------------------------------
# Python test function related hooks
# -------------------------------------------------------------------------
def pytest_pycollect_makemodule(path, parent):
""" return a Module collector or None for the given path.
This hook will be called for each matching test module path.
The pytest_collect_file hook needs to be used if you want to
create test modules for files that do not match as a test module.
"""
pytest_pycollect_makemodule.firstresult = True
def pytest_pycollect_makeitem(collector, name, obj):
""" return custom item/collector for a python object in a module, or None. """
pytest_pycollect_makeitem.firstresult = True
def pytest_pyfunc_call(pyfuncitem):
""" call underlying test function. """
pytest_pyfunc_call.firstresult = True
def pytest_generate_tests(metafunc):
""" generate (multiple) parametrized calls to a test function."""
# -------------------------------------------------------------------------
# generic runtest related hooks
# -------------------------------------------------------------------------
def pytest_itemstart(item, node=None):
""" (deprecated, use pytest_runtest_logstart). """
def pytest_runtest_protocol(item, nextitem):
""" implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
"""
pytest_runtest_protocol.firstresult = True
def pytest_runtest_logstart(nodeid, location):
""" signal the start of running a single test item. """
def pytest_runtest_setup(item):
""" called before ``pytest_runtest_call(item)``. """
def pytest_runtest_call(item):
""" called to execute the test ``item``. """
def pytest_runtest_teardown(item, nextitem):
""" called after ``pytest_runtest_call``.
:arg nextitem: the scheduled-to-be-next test item (None if no further
test item is scheduled). This argument can be used to
perform exact teardowns, i.e. calling just enough finalizers
so that nextitem only needs to call setup-functions.
"""
def pytest_runtest_makereport(item, call):
""" return a :py:class:`_pytest.runner.TestReport` object
for the given :py:class:`pytest.Item` and
:py:class:`_pytest.runner.CallInfo`.
"""
pytest_runtest_makereport.firstresult = True
def pytest_runtest_logreport(report):
""" process a test setup/call/teardown report relating to
the respective phase of executing a test. """
# -------------------------------------------------------------------------
# test session related hooks
# -------------------------------------------------------------------------
def pytest_sessionstart(session):
""" before session.main() is called. """
def pytest_sessionfinish(session, exitstatus):
""" whole test run finishes. """
# -------------------------------------------------------------------------
# hooks for customising the assert methods
# -------------------------------------------------------------------------
def pytest_assertrepr_compare(config, op, left, right):
"""return explanation for comparisons in failing assert expressions.
Return None for no custom explanation, otherwise return a list
of strings. The strings will be joined by newlines but any newlines
*in* a string will be escaped. Note that all but the first line will
be indented sligthly, the intention is for the first line to be a summary.
"""
# -------------------------------------------------------------------------
# hooks for influencing reporting (invoked from _pytest_terminal)
# -------------------------------------------------------------------------
def pytest_report_header(config, startdir):
""" return a string to be displayed as header info for terminal reporting."""
def pytest_report_teststatus(report):
""" return result-category, shortletter and verbose word for reporting."""
pytest_report_teststatus.firstresult = True
def pytest_terminal_summary(terminalreporter):
""" add additional section in terminal summary reporting. """
def pytest_logwarning(message, code, nodeid, fslocation):
""" process a warning specified by a message, a code string,
a nodeid and fslocation (both of which may be None
if the warning is not tied to a partilar node/location)."""
# -------------------------------------------------------------------------
# doctest hooks
# -------------------------------------------------------------------------
def pytest_doctest_prepare_content(content):
""" return processed content for a given doctest"""
pytest_doctest_prepare_content.firstresult = True
# -------------------------------------------------------------------------
# error handling and internal debugging hooks
# -------------------------------------------------------------------------
def pytest_plugin_registered(plugin, manager):
""" a new pytest plugin got registered. """
def pytest_internalerror(excrepr, excinfo):
""" called for internal errors. """
def pytest_keyboard_interrupt(excinfo):
""" called for keyboard interrupt. """
def pytest_exception_interact(node, call, report):
""" (experimental, new in 2.4) called when
an exception was raised which can potentially be
interactively handled.
This hook is only called if an exception was raised
that is not an internal exception like "skip.Exception".
"""
def pytest_enter_pdb():
""" called upon pdb.set_trace()"""
|
quamilek/django | refs/heads/master | tests/migrations/test_migrations_conflict/0001_initial.py | 2995 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
ATIX-AG/ansible | refs/heads/devel | lib/ansible/modules/commands/psexec.py | 28 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean93@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: psexec
short_description: Runs commands on a remote Windows host based on the PsExec
model
version_added: "2.6"
description:
- Runs a remote command from a Linux host to a Windows host without WinRM being
set up.
- Can be run on the Ansible controller to bootstrap Windows hosts to get them
ready for WinRM.
options:
hostname:
description:
- The remote Windows host to connect to, can be either an IP address or a
hostname.
required: yes
connection_username:
description:
- The username to use when connecting to the remote Windows host.
- This user must be a member of the C(Administrators) group of the Windows
host.
- Required if the Kerberos requirements are not installed or the username
is a local account to the Windows host.
- Can be omitted to use the default Kerberos principal ticket in the
local credential cache if the Kerberos library is installed.
- If I(process_username) is not specified, then the remote process will run
under a Network Logon under this account.
connection_password:
description:
- The password for I(connection_user).
- Required if the Kerberos requirements are not installed or the username
is a local account to the Windows host.
- Can be omitted to use a Kerberos principal ticket for the principal set
by I(connection_user) if the Kerberos library is installed and the
ticket has already been retrieved with the C(kinit) command before.
port:
description:
- The port that the remote SMB service is listening on.
default: 445
encrypt:
description:
- Will use SMB encryption to encrypt the SMB messages sent to and from the
host.
- This requires the SMB 3 protocol which is only supported from Windows
Server 2012 or Windows 8, older versions like Windows 7 or Windows Server
2008 (R2) must set this to C(no) and use no encryption.
- When setting to C(no), the packets are in plaintext and can be seen by
anyone sniffing the network, any process options are included in this.
type: bool
default: 'yes'
connection_timeout:
description:
- The timeout in seconds to wait when receiving the initial SMB negotiate
response from the server.
default: 60
executable:
description:
- The executable to run on the Windows host.
required: yes
arguments:
description:
- Any arguments as a single string to use when running the executable.
working_directory:
description:
- Changes the working directory set when starting the process.
default: C:\Windows\System32
asynchronous:
description:
- Will run the command as a detached process and the module returns
immediately after starting the processs while the process continues to
run in the background.
- The I(stdout) and I(stderr) return values will be null when this is set
to C(yes).
- The I(stdin) option does not work with this type of process.
- The I(rc) return value is not set when this is C(yes)
type: bool
default: 'no'
load_profile:
description:
- Runs the remote command with the user's profile loaded.
type: bool
default: 'yes'
process_username:
description:
- The user to run the process as.
- This can be set to run the process under an Interactive logon of the
specified account which bypasses limitations of a Network logon used when
this isn't specified.
- If omitted then the process is run under the same account as
I(connection_username) with a Network logon.
- Set to C(System) to run as the builtin SYSTEM account, no password is
required with this account.
- If I(encrypt) is C(no), the username and password are sent as a simple
XOR scrambled byte string that is not encrypted. No special tools are
required to get the username and password just knowledge of the protocol.
process_password:
description:
- The password for I(process_username).
- Required if I(process_username) is defined and not C(System).
integrity_level:
description:
- The integrity level of the process when I(process_username) is defined
and is not equal to C(System).
- When C(default), the default integrity level based on the system setup.
- When C(elevated), the command will be run with Administrative rights.
- When C(limited), the command will be forced to run with
non-Administrative rights.
choices:
- limited
- default
- elevated
default: default
interactive:
description:
- Will run the process as an interactive process that shows a process
Window of the Windows session specified by I(interactive_session).
- The I(stdout) and I(stderr) return values will be null when this is set
to C(yes).
- The I(stdin) option does not work with this type of process.
type: bool
default: 'no'
interactive_session:
description:
- The Windows session ID to use when displaying the interactive process on
the remote Windows host.
- This is only valid when I(interactive) is C(yes).
- The default is C(0) which is the console session of the Windows host.
default: 0
priority:
description:
- Set the command's priority on the Windows host.
- See U(https://msdn.microsoft.com/en-us/library/windows/desktop/ms683211.aspx)
for more details.
choices:
- above_normal
- below_normal
- high
- idle
- normal
- realtime
default: normal
show_ui_on_logon_screen:
description:
- Shows the process UI on the Winlogon secure desktop when
I(process_username) is C(System).
type: bool
default: 'no'
process_timeout:
description:
- The timeout in seconds that is placed upon the running process.
- A value of C(0) means no timeout.
default: 0
stdin:
description:
- Data to send on the stdin pipe once the process has started.
- This option has no effect when I(interactive) or I(asynchronous) is
C(yes).
requirements:
- pypsexec
- smbprotocol[kerberos] for optional Kerberos authentication
notes:
- This module requires the Windows host to have SMB configured and enabled,
and port 445 opened on the firewall.
- This module will wait until the process is finished unless I(asynchronous)
is C(yes), ensure the process is run as a non-interactive command to avoid
infinite hangs waiting for input.
- The I(connection_username) must be a member of the local Administrator group
of the Windows host. For non-domain joined hosts, the
C(LocalAccountTokenFilterPolicy) should be set to C(1) to ensure this works,
see U(https://support.microsoft.com/en-us/help/951016/description-of-user-account-control-and-remote-restrictions-in-windows).
- For more information on this module and the various host requirements, see
U(https://github.com/jborean93/pypsexec).
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: run a cmd.exe command
psexec:
hostname: server
connection_username: username
connection_password: password
executable: cmd.exe
arguments: /c echo Hello World
- name: run a PowerShell command
psexec:
hostname: server.domain.local
connection_username: username@DOMAIN.LOCAL
connection_password: password
executable: powershell.exe
arguments: Write-Host Hello World
- name: send data through stdin
psexec:
hostname: 192.168.1.2
connection_username: username
connection_password: password
executable: powershell.exe
arguments: '-'
stdin: |
Write-Host Hello World
Write-Error Error Message
exit 0
- name: Run the process as a different user
psexec:
hostname: server
connection_user: username
connection_password: password
executable: whoami.exe
arguments: /all
process_username: anotheruser
process_password: anotherpassword
- name: Run the process asynchronously
psexec:
hostname: server
connection_username: username
connection_password: password
executable: cmd.exe
arguments: /c rmdir C:\temp
asynchronous: yes
- name: Use Kerberos authentication for the connection (requires smbprotocol[kerberos])
psexec:
hostname: host.domain.local
connection_username: user@DOMAIN.LOCAL
executable: C:\some\path\to\executable.exe
arguments: /s
- name: Disable encryption to work with WIndows 7/Server 2008 (R2)
psexec:
hostanme: windows-pc
connection_username: Administrator
connection_password: Password01
encrypt: no
integrity_level: elevated
process_username: Administrator
process_password: Password01
executable: powershell.exe
arguments: (New-Object -ComObject Microsoft.Update.Session).CreateUpdateInstaller().IsBusy
- name: Download and run ConfigureRemotingForAnsible.ps1 to setup WinRM
psexec:
hostname: windows-pc
connection_username: Administrator
connection_password: Password01
encrypt: yes
executable: powershell.exe
arguments: '-'
stdin: |
$ErrorActionPreference = "Stop"
$sec_protocols = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::SystemDefault
$sec_protocols = $sec_protocols -bor [Net.SecurityProtocolType]::Tls12
[Net.ServicePointManager]::SecurityProtocol = $sec_protocols
$url = "https://github.com/ansible/ansible/raw/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"
Invoke-Expression ((New-Object Net.WebClient).DownloadString($url))
exit
'''
RETURN = r'''
msg:
description: Any exception details when trying to run the process
returned: module failed
type: str
sample: 'Received exception from remote PAExec service: Failed to start "invalid.exe". The system cannot find the file specified. [Err=0x2, 2]'
stdout:
description: The stdout from the remote process
returned: success and interactive or asynchronous is 'no'
type: str
sample: Hello World
stderr:
description: The stderr from the remote process
returned: success and interactive or asynchronous is 'no'
type: str
sample: Error [10] running process
pid:
description: The process ID of the asynchronous process that was created
returned: success and asynchronous is 'yes'
type: int
sample: 719
rc:
description: The return code of the remote process
returned: success and asynchronous is 'no'
type: int
sample: 0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_text
PYPSEXEC_IMP_ERR = None
try:
from pypsexec import client
from pypsexec.exceptions import PypsexecException, PAExecException, \
PDUException, SCMRException
from pypsexec.paexec import ProcessPriority
from smbprotocol.exceptions import SMBException, SMBAuthenticationError, \
SMBResponseException
HAS_PYPSEXEC = True
except ImportError as exc:
PYPSEXEC_IMP_ERR = exc
HAS_PYPSEXEC = False
KERBEROS_IMP_ERR = None
try:
import gssapi
# GSSAPI extension required for Kerberos Auth in SMB
from gssapi.raw import inquire_sec_context_by_oid
HAS_KERBEROS = True
except ImportError as exc:
KERBEROS_IMP_ERR = exc
HAS_KERBEROS = False
def remove_artifacts(module, client):
try:
client.remove_service()
except (SMBException, PypsexecException) as exc:
module.warn("Failed to cleanup PAExec service and executable: %s"
% to_text(exc))
def main():
module_args = dict(
hostname=dict(type='str', required=True),
connection_username=dict(type='str'),
connection_password=dict(type='str', no_log=True),
port=dict(type='int', required=False, default=445),
encrypt=dict(type='bool', default=True),
connection_timeout=dict(type='int', default=60),
executable=dict(type='str', required=True),
arguments=dict(type='str'),
working_directory=dict(type='str', default=r'C:\Windows\System32'),
asynchronous=dict(type='bool', default=False),
load_profile=dict(type='bool', default=True),
process_username=dict(type='str'),
process_password=dict(type='str', no_log=True),
integrity_level=dict(type='str', default='default',
choices=['default', 'elevated', 'limited']),
interactive=dict(type='bool', default=False),
interactive_session=dict(type='int', default=0),
priority=dict(type='str', default='normal',
choices=['above_normal', 'below_normal', 'high',
'idle', 'normal', 'realtime']),
show_ui_on_logon_screen=dict(type='bool', default=False),
process_timeout=dict(type='int', default=0),
stdin=dict(type='str')
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
)
process_username = module.params['process_username']
process_password = module.params['process_password']
use_system = False
if process_username is not None and process_username.lower() == "system":
use_system = True
process_username = None
process_password = None
if process_username is not None and process_password is None:
module.fail_json(msg='parameters are required together when not '
'running as System: process_username, '
'process_password')
if not HAS_PYPSEXEC:
module.fail_json(msg='The pypsexec python module is required',
exception=PYPSEXEC_IMP_ERR)
hostname = module.params['hostname']
connection_username = module.params['connection_username']
connection_password = module.params['connection_password']
port = module.params['port']
encrypt = module.params['encrypt']
connection_timeout = module.params['connection_timeout']
executable = module.params['executable']
arguments = module.params['arguments']
working_directory = module.params['working_directory']
asynchronous = module.params['asynchronous']
load_profile = module.params['load_profile']
elevated = module.params['integrity_level'] == "elevated"
limited = module.params['integrity_level'] == "limited"
interactive = module.params['interactive']
interactive_session = module.params['interactive_session']
priority = {
"above_normal": ProcessPriority.ABOVE_NORMAL_PRIORITY_CLASS,
"below_normal": ProcessPriority.BELOW_NORMAL_PRIORITY_CLASS,
"high": ProcessPriority.HIGH_PRIORITY_CLASS,
"idle": ProcessPriority.IDLE_PRIORITY_CLASS,
"normal": ProcessPriority.NORMAL_PRIORITY_CLASS,
"realtime": ProcessPriority.REALTIME_PRIORITY_CLASS
}[module.params['priority']]
show_ui_on_logon_screen = module.params['show_ui_on_logon_screen']
process_timeout = module.params['process_timeout']
stdin = module.params['stdin']
if connection_username is None or connection_password is None and \
not HAS_KERBEROS:
module.fail_json(msg='The gssapi python module with the GGF extension '
'is required for Kerberos authentication',
exception=KERBEROS_IMP_ERR)
win_client = client.Client(server=hostname, username=connection_username,
password=connection_password, port=port,
encrypt=encrypt)
try:
win_client.connect(timeout=connection_timeout)
except SMBAuthenticationError as exc:
module.fail_json(msg='Failed to authenticate over SMB: %s'
% to_text(exc))
except SMBResponseException as exc:
module.fail_json(msg='Received unexpected SMB response when opening '
'the connection: %s' % to_text(exc))
except PDUException as exc:
module.fail_json(msg='Received an exception with RPC PDU message: %s'
% to_text(exc))
except SCMRException as exc:
module.fail_json(msg='Received an exception when dealing with SCMR on '
'the Windows host: %s' % to_text(exc))
except (SMBException, PypsexecException) as exc:
module.fail_json(msg=to_text(exc))
# create PAExec service and run the process
result['changed'] = True
b_stdin = to_bytes(stdin, encoding='utf-8') if stdin else None
run_args = dict(
executable=executable, arguments=arguments, asynchronous=asynchronous,
load_profile=load_profile, interactive_session=interactive_session,
run_elevated=elevated, run_limited=limited,
username=process_username, password=process_password,
use_system_account=use_system, working_dir=working_directory,
priority=priority, show_ui_on_win_logon=show_ui_on_logon_screen,
timeout_seconds=process_timeout, stdin=b_stdin
)
try:
win_client.create_service()
except (SMBException, PypsexecException) as exc:
module.fail_json(msg='Failed to create PAExec service: %s'
% to_text(exc))
try:
proc_result = win_client.run_executable(**run_args)
except (SMBException, PypsexecException) as exc:
module.fail_json(msg='Received error when running remote process: %s'
% to_text(exc))
finally:
remove_artifacts(module, win_client)
if asynchronous:
result['pid'] = proc_result[2]
elif interactive:
result['rc'] = proc_result[2]
else:
result['stdout'] = proc_result[0]
result['stderr'] = proc_result[1]
result['rc'] = proc_result[2]
# close the SMB connection
try:
win_client.disconnect()
except (SMBException, PypsexecException) as exc:
module.warn("Failed to close the SMB connection: %s" % to_text(exc))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
pam-bot/SMSQuery | refs/heads/master | lib/flask/testsuite/deprecations.py | 563 | # -*- coding: utf-8 -*-
"""
flask.testsuite.deprecations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests deprecation support.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase, catch_warnings
class DeprecationsTestCase(FlaskTestCase):
"""not used currently"""
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DeprecationsTestCase))
return suite
|
pquentin/django | refs/heads/stable/1.8.x | tests/generic_views/test_forms.py | 453 | from __future__ import unicode_literals
from django import forms
from .models import Author
class AuthorForm(forms.ModelForm):
name = forms.CharField()
slug = forms.SlugField()
class Meta:
model = Author
fields = ['name', 'slug']
class ContactForm(forms.Form):
name = forms.CharField()
message = forms.CharField(widget=forms.Textarea)
|
brosander/lc-tools | refs/heads/master | lcNeat.py | 1 | #!/usr/bin/python
import argparse
import csv
import json
import logging
import math
import MultiNEAT as NEAT
import os
import pickle
import random
import shutil
import sys
import time
def outputFilename(outputDir, prefix, timestamp, extension):
return '/'.join([outputDir, '.'.join([prefix, str(timestamp), extension])])
def jsonDump(value, outputDir, prefix, timestamp):
with open(outputFilename(outputDir, prefix, timestamp, 'json'), 'w') as outFile:
json.dump(value, outFile, indent=4, sort_keys=True)
def percentToFraction(percent):
assert percent.endswith('%')
splitPct = percent.split('.')
decimal = splitPct[1][:-1]
rawNum = splitPct[0] + decimal
result = int(rawNum) / (100.0 * (math.pow(10, len(decimal))) )
assert result > 0 and result < 0.4
return result
class HistoricalData(object):
def __init__(self, inputDir, logger, percentTraining):
self.training = []
self.test = []
result = []
valid_status = set(['Default', 'Fully Paid', 'Charged Off'])
seen_status = set([])
for potentialFile in os.listdir(inputDir):
if potentialFile.endswith('.csv'):
validRows = 0
invalidRows = 0
with open('/'.join([inputDir, potentialFile]), 'rb') as csvFile:
csvFile.readline()
csvReader = csv.DictReader(csvFile)
for row in csvReader:
int_rate_raw = row['int_rate']
loan_status = row['loan_status']
inq_last_6mths = row['inq_last_6mths']
seen_status.add(loan_status)
if int_rate_raw and inq_last_6mths and loan_status in valid_status:
int_rate = percentToFraction(int_rate_raw)
inq_last_6mths = int(inq_last_6mths)
row['int_rate'] = int_rate
row['inq_last_6mths'] = inq_last_6mths
result.append((row, [int_rate, inq_last_6mths]))
validRows += 1
else:
invalidRows += 1
logger.info(potentialFile + ' had ' + str(validRows) + ' valid rows, ' + str(invalidRows) + ' invalid rows.')
logger.info('Status codes encountered: ' + str(seen_status))
resultLen = len(result)
trainingSet = set(random.sample(xrange(resultLen), int(resultLen * percentTraining)))
for index, elem in enumerate(result):
if index in trainingSet:
self.training.append(elem)
else:
self.test.append(elem)
winners = 0
for inst in self.training:
if inst[0]['loan_status'] == 'Fully Paid':
winners += 1
logger.info('Winners: ' + str(winners) + ' out of ' + str(len(self.training)))
def evaluate(genome, inputs):
# this creates a neural network (phenotype) from the genome
net = NEAT.NeuralNetwork()
genome.BuildPhenotype(net)
fitness = 0
picks = ([], [])
for inst in inputs:
net.Input(inst[1])
net.Activate()
output = net.Output()
if output[0] >= 0.5:
if inst[0]['loan_status'] == 'Fully Paid':
fitness += inst[0]['int_rate']
picks[0].append(inst)
else:
fitness -= 1
picks[1].append(inst)
return (fitness, picks[0], picks[1])
trueStrings = set(['true', 'True', 'y', 'Y'])
def setParam(params, key, value):
try:
setattr(params, key, value)
except:
try:
setattr(params, key, float(value))
except:
try:
setattr(params, key, int(value))
except:
setattr(params, key, value in trueStrings)
def runNeat(historicalData, outputDir, logger, timestamp, generations, parameters):
inputs = historicalData.training
max_fitness = -50000
max_fitness_genome = None
max_winners = None
max_losers = None
params = NEAT.Parameters()
for parameter in parameters:
key, value = parameter.split('=')
setParam(params, key, value)
genome = NEAT.Genome(0, len(inputs[0][1]), 0, 1, False, NEAT.ActivationFunction.UNSIGNED_SIGMOID, NEAT.ActivationFunction.UNSIGNED_SIGMOID, 0, params)
pop = NEAT.Population(genome, params, True, 1.0)
for generation in range(generations): # run for 100 generations
# retrieve a list of all genomes in the population
genome_list = NEAT.GetGenomeList(pop)
# apply the evaluation function to all genomes
for genome in genome_list:
fitness, winners, losers = evaluate(genome, inputs)
genome.SetFitness(fitness)
if fitness > max_fitness:
logger.debug('Found new champion with fitness ' + str(fitness) + ' that picked ' + str(len(winners)) + ' winners and ' + str(len(losers)) + ' losers.')
max_fitness = fitness
max_fitness_genome = pickle.dumps(genome)
max_winners = winners
max_losers = losers
# at this point we may output some information regarding the progress of evolution, best fitness, etc.
# it's also the place to put any code that tracks the progress and saves the best genome or the entire
# population. We skip all of this in the tutorial.
# advance to the next generation
pop.Epoch()
logger.info('Done with generation: ' + str(generation))
max_fitness_genome = pickle.loads(max_fitness_genome)
fitness, winners, losers = evaluate(max_fitness_genome, historicalData.test)
logger.debug('Champion performance on test data: ' + str(fitness) + ' fitness picked ' + str(len(winners)) + ' winners and ' + str(len(losers)) + ' losers.')
max_fitness_genome.Save(outputFilename(outputDir, 'maxFitnessGenome', timestamp, 'ge'))
copyScriptSource = os.path.abspath(__file__)
copyScriptDest = outputFilename(outputDir, 'lcNeat', timestamp, 'py')
shutil.copyfile(copyScriptSource, copyScriptDest)
jsonDump(max_winners, outputDir, 'winners', timestamp)
jsonDump(max_losers, outputDir, 'losers', timestamp)
jsonDump(inputs, outputDir, 'sample', timestamp)
def resolveDir(directory):
return os.path.abspath(os.path.expanduser(directory))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
This script is intended to run on the historical data from lending club
''', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--inputDirectory", default=None, help="The input directory with the historical csvs")
parser.add_argument("-o", "--outputDirectory", default=None, help="The output directory")
parser.add_argument("-g", "--generations", type=int, default=100, help="The number of generatios")
parser.add_argument("-t", "--training", type=int, default=70, help="The percent of data to be used for training (the rest will be used to test the winner)")
parser.add_argument("-p", "--parameter", action='append')
args = parser.parse_args()
if not args.inputDirectory:
raise Exception('Must specify input directory')
if not args.outputDirectory:
raise Exception('Must specify output directory')
if not args.parameter:
args.parameter = []
timestamp = time.time()
outputDir = resolveDir(args.outputDirectory)
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(outputFilename(outputDir, 'output', timestamp, 'log'))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.DEBUG)
runNeat(HistoricalData(resolveDir(args.inputDirectory), rootLogger, args.training / 100.0), outputDir, rootLogger, timestamp, args.generations, args.parameter)
|
blitzmann/Pyfa | refs/heads/master | gui/builtinAdditionPanes/fighterView.py | 1 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
import gui.globalEvents as GE
from gui.builtinMarketBrowser.events import ItemSelected, ITEM_SELECTED
import gui.mainFrame
import gui.display as d
from gui.builtinViewColumns.state import State
from eos.const import FittingSlot
from gui.contextMenu import ContextMenu
from gui.utils.staticHelpers import DragDropHelper
from service.fit import Fit
from service.market import Market
import gui.fitCommands as cmd
class FighterViewDrop(wx.DropTarget):
def __init__(self, dropFn, *args, **kwargs):
super(FighterViewDrop, self).__init__(*args, **kwargs)
self.dropFn = dropFn
# this is really transferring an EVE itemID
self.dropData = wx.TextDataObject()
self.SetDataObject(self.dropData)
def OnData(self, x, y, t):
if self.GetData():
dragged_data = DragDropHelper.data
data = dragged_data.split(':')
self.dropFn(x, y, data)
return t
class FighterView(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, style=wx.TAB_TRAVERSAL)
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.labels = ["Light", "Heavy", "Support"]
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.fighterDisplay = FighterDisplay(self)
mainSizer.Add(self.fighterDisplay, 1, wx.EXPAND, 0)
textSizer = wx.BoxSizer(wx.HORIZONTAL)
textSizer.AddStretchSpacer()
for x in self.labels:
lbl = wx.StaticText(self, wx.ID_ANY, x.capitalize())
textSizer.Add(lbl, 0, wx.ALIGN_CENTER | wx.LEFT, 5)
lbl = wx.StaticText(self, wx.ID_ANY, "0")
setattr(self, "label%sUsed" % (x.capitalize()), lbl)
textSizer.Add(lbl, 0, wx.ALIGN_CENTER | wx.LEFT, 5)
textSizer.Add(wx.StaticText(self, wx.ID_ANY, "/"), 0, wx.ALIGN_CENTER)
lbl = wx.StaticText(self, wx.ID_ANY, "0")
setattr(self, "label%sTotal" % (x.capitalize()), lbl)
textSizer.Add(lbl, 0, wx.ALIGN_CENTER)
textSizer.AddStretchSpacer()
mainSizer.Add(textSizer, 0, wx.EXPAND, 5)
self.SetSizer(mainSizer)
self.SetAutoLayout(True)
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
def fitChanged(self, event):
sFit = Fit.getInstance()
activeFitID = self.mainFrame.getActiveFit()
fit = sFit.getFit(activeFitID)
if fit:
for x in self.labels:
if fit.isStructure:
slot = getattr(FittingSlot, "FS_{}".format(x.upper()))
else:
slot = getattr(FittingSlot, "F_{}".format(x.upper()))
used = fit.getSlotsUsed(slot)
total = fit.getNumSlots(slot)
color = wx.Colour(204, 51, 51) if used > total else wx.SystemSettings.GetColour(
wx.SYS_COLOUR_WINDOWTEXT)
lbl = getattr(self, "label%sUsed" % x.capitalize())
lbl.SetLabel(str(int(used)))
lbl.SetForegroundColour(color)
lbl = getattr(self, "label%sTotal" % x.capitalize())
lbl.SetLabel(str(int(total)))
lbl.SetForegroundColour(color)
self.Refresh()
event.Skip()
class FighterDisplay(d.Display):
DEFAULT_COLS = ["State",
# "Base Icon",
"Base Name",
# "prop:droneDps,droneBandwidth",
# "Max Range",
# "Miscellanea",
"attr:maxVelocity",
"Fighter Abilities",
"Price",
]
def __init__(self, parent):
d.Display.__init__(self, parent, style=wx.LC_SINGLE_SEL | wx.BORDER_NONE)
self.lastFitId = None
self.hoveredRow = None
self.hoveredColumn = None
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
self.mainFrame.Bind(ITEM_SELECTED, self.addItem)
self.Bind(wx.EVT_LEFT_DCLICK, self.removeItem)
self.Bind(wx.EVT_LEFT_DOWN, self.click)
self.Bind(wx.EVT_KEY_UP, self.kbEvent)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_CONTEXT_MENU, self.spawnMenu)
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.startDrag)
self.SetDropTarget(FighterViewDrop(self.handleDragDrop))
def OnLeaveWindow(self, event):
self.SetToolTip(None)
self.hoveredRow = None
self.hoveredColumn = None
event.Skip()
def OnMouseMove(self, event):
row, _, col = self.HitTestSubItem(event.Position)
if row != self.hoveredRow or col != self.hoveredColumn:
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredRow = row
self.hoveredColumn = col
if row != -1 and col != -1 and col < len(self.DEFAULT_COLS):
mod = self.fighters[self.GetItemData(row)]
if self.DEFAULT_COLS[col] == "Miscellanea":
tooltip = self.activeColumns[col].getToolTip(mod)
if tooltip is not None:
self.SetToolTip(tooltip)
else:
self.SetToolTip(None)
else:
self.SetToolTip(None)
else:
self.SetToolTip(None)
event.Skip()
def kbEvent(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_DELETE or keycode == wx.WXK_NUMPAD_DELETE:
row = self.GetFirstSelected()
if row != -1:
fighter = self.fighters[self.GetItemData(row)]
self.removeFighter(fighter)
event.Skip()
def startDrag(self, event):
row = event.GetIndex()
if row != -1:
data = wx.TextDataObject()
dataStr = "fighter:" + str(row)
data.SetText(dataStr)
dropSource = wx.DropSource(self)
dropSource.SetData(data)
DragDropHelper.data = dataStr
dropSource.DoDragDrop()
def handleDragDrop(self, x, y, data):
"""
Handles dragging of items from various pyfa displays which support it
data is list with two indices:
data[0] is hard-coded str of originating source
data[1] is typeID or index of data we want to manipulate
"""
if data[0] == "fighter": # we want to merge fighters
srcRow = int(data[1])
dstRow, _ = self.HitTest((x, y))
if srcRow != -1 and dstRow != -1:
self._merge(srcRow, dstRow)
elif data[0] == "market":
wx.PostEvent(self.mainFrame, ItemSelected(itemID=int(data[1])))
@staticmethod
def _merge(src, dst):
return
FIGHTER_ORDER = ('Heavy Fighter', 'Light Fighter', 'Support Fighter')
def fighterKey(self, fighter):
sMkt = Market.getInstance()
groupName = sMkt.getGroupByItem(fighter.item).name
return (self.FIGHTER_ORDER.index(groupName), fighter.item.name)
def fitChanged(self, event):
sFit = Fit.getInstance()
fit = sFit.getFit(event.fitID)
self.Parent.Parent.Parent.DisablePage(self.Parent, not fit)
# Clear list and get out if current fitId is None
if event.fitID is None and self.lastFitId is not None:
self.DeleteAllItems()
self.lastFitId = None
event.Skip()
return
self.original = fit.fighters if fit is not None else None
self.fighters = fit.fighters[:] if fit is not None else None
if self.fighters is not None:
self.fighters.sort(key=self.fighterKey)
if event.fitID != self.lastFitId:
self.lastFitId = event.fitID
item = self.GetNextItem(-1, wx.LIST_NEXT_ALL, wx.LIST_STATE_DONTCARE)
if item != -1:
self.EnsureVisible(item)
self.deselectItems()
self.update(self.fighters)
event.Skip()
def addItem(self, event):
fitID = self.mainFrame.getActiveFit()
if self.mainFrame.command.Submit(cmd.GuiAddFighterCommand(fitID, event.itemID)):
self.mainFrame.additionsPane.select("Fighters")
event.Skip()
def removeItem(self, event):
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col != self.getColIndex(State):
fighter = self.fighters[self.GetItemData(row)]
self.removeFighter(fighter)
def removeFighter(self, fighter):
fitID = self.mainFrame.getActiveFit()
self.mainFrame.command.Submit(cmd.GuiRemoveFighterCommand(fitID, self.original.index(fighter)))
def click(self, event):
event.Skip()
row, _ = self.HitTest(event.Position)
if row != -1:
col = self.getColumn(event.Position)
if col == self.getColIndex(State):
fitID = self.mainFrame.getActiveFit()
fighter = self.fighters[row]
self.mainFrame.command.Submit(cmd.GuiToggleFighterCommand(fitID, self.original.index(fighter)))
def spawnMenu(self, event):
sel = self.GetFirstSelected()
if sel != -1:
fighter = self.fighters[sel]
sMkt = Market.getInstance()
sourceContext = "fighterItem"
itemContext = sMkt.getCategoryByItem(fighter.item).name
menu = ContextMenu.getMenu((fighter,), (sourceContext, itemContext))
self.PopupMenu(menu)
|
behzadnouri/scipy | refs/heads/master | runtests.py | 15 | #!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
#
# This is a generic test runner script for projects using Numpy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "scipy/special/tests/test_basic.py:test_xlogy"
SAMPLE_SUBMODULE = "optimize"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
import imp
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=1,
help="Number of parallel jobs during build (requires "
"Numpy 1.10 or greater).")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="append", metavar="BEFORE",
help=("Compare benchmark results of current HEAD to BEFORE. "
"Use an additional --bench-compare=COMMIT to override HEAD with COMMIT. "
"Note that you need to commit your changes first!"
))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug version; remove -g flag ***")
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = imp.new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cover-html',
'--cover-html-dir='+dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'run', '-n', '-e', '--python=same'] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
else:
if len(args.bench_compare) == 1:
commit_a = args.bench_compare[0]
commit_b = 'HEAD'
elif len(args.bench_compare) == 2:
commit_a, commit_b = args.bench_compare
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet', '--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b], stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a], stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'--current-repo', 'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if args.build_only:
sys.exit(0)
elif args.submodule:
modname = PROJECT_MODULE + '.' + args.submodule
try:
__import__(modname)
test = sys.modules[modname].test
except (ImportError, KeyError, AttributeError) as e:
print("Cannot run tests for %s (%s)" % (modname, e))
sys.exit(2)
elif args.tests:
def fix_test_path(x):
# fix up test path
p = x.split(':')
p[0] = os.path.relpath(os.path.abspath(p[0]),
test_dir)
return ':'.join(p)
tests = [fix_test_path(x) for x in args.tests]
def test(*a, **kw):
extra_argv = kw.pop('extra_argv', ())
extra_argv = extra_argv + tests[1:]
kw['extra_argv'] = extra_argv
from numpy.testing import Tester
return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
# Run the tests under build/test
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ['build']
if args.parallel > 1:
cmd += ['-j', str(args.parallel)]
# Install; avoid producing eggs so scipy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
env['PYTHONPATH'] = site_dir
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except:
p.terminate()
raise
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
return site_dir
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
|
zachmullen/boto | refs/heads/develop | boto/iam/connection.py | 61 | # Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import boto.jsonresponse
from boto.compat import json, six
from boto.resultset import ResultSet
from boto.iam.summarymap import SummaryMap
from boto.connection import AWSQueryConnection
DEFAULT_POLICY_DOCUMENTS = {
'default': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
'amazonaws.com.cn': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com.cn']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
}
# For backward-compatibility, we'll preserve this here.
ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default'])
class IAMConnection(AWSQueryConnection):
APIVersion = '2010-05-08'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
super(IAMConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory,
path, security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def get_response(self, action, params, path='/', parent=None,
verb='POST', list_marker='Set'):
"""
Utility method to handle calls to IAM and parsing of responses.
"""
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
if body:
e = boto.jsonresponse.Element(list_marker=list_marker,
pythonize_name=True)
h = boto.jsonresponse.XmlHandler(e, parent)
h.parse(body)
return e
else:
# Support empty responses, e.g. deleting a SAML provider
# according to the official documentation.
return {}
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
#
# Group methods
#
def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
"""
List the groups that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only groups whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroups', params,
list_marker='Groups')
def get_group(self, group_name, marker=None, max_items=None):
"""
Return a list of users that are in the specified group.
:type group_name: string
:param group_name: The name of the group whose information should
be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('GetGroup', params, list_marker='Users')
def create_group(self, group_name, path='/'):
"""
Create a group.
:type group_name: string
:param group_name: The name of the new group
:type path: string
:param path: The path to the group (Optional). Defaults to /.
"""
params = {'GroupName': group_name,
'Path': path}
return self.get_response('CreateGroup', params)
def delete_group(self, group_name):
"""
Delete a group. The group must not contain any Users or
have any attached policies
:type group_name: string
:param group_name: The name of the group to delete.
"""
params = {'GroupName': group_name}
return self.get_response('DeleteGroup', params)
def update_group(self, group_name, new_group_name=None, new_path=None):
"""
Updates name and/or path of the specified group.
:type group_name: string
:param group_name: The name of the new group
:type new_group_name: string
:param new_group_name: If provided, the name of the group will be
changed to this name.
:type new_path: string
:param new_path: If provided, the path of the group will be
changed to this path.
"""
params = {'GroupName': group_name}
if new_group_name:
params['NewGroupName'] = new_group_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateGroup', params)
def add_user_to_group(self, group_name, user_name):
"""
Add a user to a group
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The to be added to the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('AddUserToGroup', params)
def remove_user_from_group(self, group_name, user_name):
"""
Remove a user from a group.
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The user to remove from the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('RemoveUserFromGroup', params)
def put_group_policy(self, group_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutGroupPolicy', params, verb='POST')
def get_all_group_policies(self, group_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupPolicies', params,
list_marker='PolicyNames')
def get_group_policy(self, group_name, policy_name):
"""
Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST')
def delete_group_policy(self, group_name, policy_name):
"""
Deletes the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('DeleteGroupPolicy', params, verb='POST')
def get_all_users(self, path_prefix='/', marker=None, max_items=None):
"""
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'PathPrefix': path_prefix}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUsers', params, list_marker='Users')
#
# User methods
#
def create_user(self, user_name, path='/'):
"""
Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /.
"""
params = {'UserName': user_name,
'Path': path}
return self.get_response('CreateUser', params)
def delete_user(self, user_name):
"""
Delete a user including the user's path, GUID and ARN.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteUser', params)
def get_user(self, user_name=None):
"""
Retrieve information about the specified user.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to retrieve.
If not specified, defaults to user making request.
"""
params = {}
if user_name:
params['UserName'] = user_name
return self.get_response('GetUser', params)
def update_user(self, user_name, new_user_name=None, new_path=None):
"""
Updates name and/or path of the specified user.
:type user_name: string
:param user_name: The name of the user
:type new_user_name: string
:param new_user_name: If provided, the username of the user will be
changed to this username.
:type new_path: string
:param new_path: If provided, the path of the user will be
changed to this path.
"""
params = {'UserName': user_name}
if new_user_name:
params['NewUserName'] = new_user_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateUser', params)
def get_all_user_policies(self, user_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUserPolicies', params,
list_marker='PolicyNames')
def put_user_policy(self, user_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'UserName': user_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutUserPolicy', params, verb='POST')
def get_user_policy(self, user_name, policy_name):
"""
Retrieves the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('GetUserPolicy', params, verb='POST')
def delete_user_policy(self, user_name, policy_name):
"""
Deletes the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('DeleteUserPolicy', params, verb='POST')
def get_groups_for_user(self, user_name, marker=None, max_items=None):
"""
List the groups that a specified user belongs to.
:type user_name: string
:param user_name: The name of the user to list groups for.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupsForUser', params,
list_marker='Groups')
#
# Access Keys
#
def get_all_access_keys(self, user_name, marker=None, max_items=None):
"""
Get all access keys associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListAccessKeys', params,
list_marker='AccessKeyMetadata')
def create_access_key(self, user_name=None):
"""
Create a new AWS Secret Access Key and corresponding AWS Access Key ID
for the specified user. The default status for new keys is Active
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('CreateAccessKey', params)
def update_access_key(self, access_key_id, status, user_name=None):
"""
Changes the status of the specified access key from Active to Inactive
or vice versa. This action can be used to disable a user's key as
part of a key rotation workflow.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key.
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of user (optional).
"""
params = {'AccessKeyId': access_key_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateAccessKey', params)
def delete_access_key(self, access_key_id, user_name=None):
"""
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
"""
params = {'AccessKeyId': access_key_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteAccessKey', params)
#
# Signing Certificates
#
def get_all_signing_certs(self, marker=None, max_items=None,
user_name=None):
"""
Get all signing certificates associated with an account.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
:type user_name: string
:param user_name: The username of the user
"""
params = {}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
if user_name:
params['UserName'] = user_name
return self.get_response('ListSigningCertificates',
params, list_marker='Certificates')
def update_signing_cert(self, cert_id, status, user_name=None):
"""
Change the status of the specified signing certificate from
Active to Inactive or vice versa.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_id: string
:param cert_id: The ID of the signing certificate
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateId': cert_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateSigningCertificate', params)
def upload_signing_cert(self, cert_body, user_name=None):
"""
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateBody': cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
verb='POST')
def delete_signing_cert(self, cert_id, user_name=None):
"""
Delete a signing certificate associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
:type cert_id: string
:param cert_id: The ID of the certificate.
"""
params = {'CertificateId': cert_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteSigningCertificate', params)
#
# Server Certificates
#
def list_server_certs(self, path_prefix='/',
marker=None, max_items=None):
"""
Lists the server certificates that have the specified path prefix.
If none exist, the action returns an empty list.
:type path_prefix: string
:param path_prefix: If provided, only certificates whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListServerCertificates',
params,
list_marker='ServerCertificateMetadataList')
# Preserves backwards compatibility.
# TODO: Look into deprecating this eventually?
get_all_server_certs = list_server_certs
def update_server_cert(self, cert_name, new_cert_name=None,
new_path=None):
"""
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
params = {'ServerCertificateName': cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateServerCertificate', params)
def upload_server_cert(self, cert_name, cert_body, private_key,
cert_chain=None, path=None):
"""
Uploads a server certificate entity for the AWS Account.
The server certificate entity includes a public key certificate,
a private key, and an optional certificate chain, which should
all be PEM-encoded.
:type cert_name: string
:param cert_name: The name for the server certificate. Do not
include the path in this value.
:type cert_body: string
:param cert_body: The contents of the public key certificate
in PEM-encoded format.
:type private_key: string
:param private_key: The contents of the private key in
PEM-encoded format.
:type cert_chain: string
:param cert_chain: The contents of the certificate chain. This
is typically a concatenation of the PEM-encoded
public key certificates of the chain.
:type path: string
:param path: The path for the server certificate.
"""
params = {'ServerCertificateName': cert_name,
'CertificateBody': cert_body,
'PrivateKey': private_key}
if cert_chain:
params['CertificateChain'] = cert_chain
if path:
params['Path'] = path
return self.get_response('UploadServerCertificate', params,
verb='POST')
def get_server_certificate(self, cert_name):
"""
Retrieves information about the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to retrieve information about.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('GetServerCertificate', params)
def delete_server_cert(self, cert_name):
"""
Delete the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to delete.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('DeleteServerCertificate', params)
#
# MFA Devices
#
def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
"""
Get all MFA devices associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListMFADevices',
params, list_marker='MFADevices')
def enable_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Enables the specified MFA device and associates it with the
specified user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('EnableMFADevice', params)
def deactivate_mfa_device(self, user_name, serial_number):
"""
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number}
return self.get_response('DeactivateMFADevice', params)
def resync_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Syncronizes the specified MFA device with the AWS servers.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('ResyncMFADevice', params)
#
# Login Profiles
#
def get_login_profiles(self, user_name):
"""
Retrieves the login profile for the specified user.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('GetLoginProfile', params)
def create_login_profile(self, user_name, password):
"""
Creates a login profile for the specified user, give the user the
ability to access AWS services and the AWS Management Console.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('CreateLoginProfile', params)
def delete_login_profile(self, user_name):
"""
Deletes the login profile associated with the specified user.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteLoginProfile', params)
def update_login_profile(self, user_name, password):
"""
Resets the password associated with the user's login profile.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('UpdateLoginProfile', params)
def create_account_alias(self, alias):
"""
Creates a new alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to attach to the account.
"""
params = {'AccountAlias': alias}
return self.get_response('CreateAccountAlias', params)
def delete_account_alias(self, alias):
"""
Deletes an alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to remove from the account.
"""
params = {'AccountAlias': alias}
return self.get_response('DeleteAccountAlias', params)
def get_account_alias(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_response('ListAccountAliases', {},
list_marker='AccountAliases')
def get_signin_url(self, service='ec2'):
"""
Get the URL where IAM users can use their login profile to sign in
to this account's console.
:type service: string
:param service: Default service to go to in the console.
"""
alias = self.get_account_alias()
if not alias:
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
resp = alias.get('list_account_aliases_response', {})
result = resp.get('list_account_aliases_result', {})
aliases = result.get('account_aliases', [])
if not len(aliases):
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
# We'll just use the first one we find.
alias = aliases[0]
if self.host == 'iam.us-gov.amazonaws.com':
return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (
alias,
service
)
elif self.host.endswith('amazonaws.com.cn'):
return "https://%s.signin.amazonaws.cn/console/%s" % (
alias,
service
)
else:
return "https://%s.signin.aws.amazon.com/console/%s" % (
alias,
service
)
def get_account_summary(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_object('GetAccountSummary', {}, SummaryMap)
#
# IAM Roles
#
def add_role_to_instance_profile(self, instance_profile_name, role_name):
"""
Adds the specified role to the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to add.
"""
return self.get_response('AddRoleToInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def create_instance_profile(self, instance_profile_name, path=None):
"""
Creates a new instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to create.
:type path: string
:param path: The path to the instance profile.
"""
params = {'InstanceProfileName': instance_profile_name}
if path is not None:
params['Path'] = path
return self.get_response('CreateInstanceProfile', params)
def _build_policy(self, assume_role_policy_document=None):
if assume_role_policy_document is not None:
if isinstance(assume_role_policy_document, six.string_types):
# Historically, they had to pass a string. If it's a string,
# assume the user has already handled it.
return assume_role_policy_document
else:
for tld, policy in DEFAULT_POLICY_DOCUMENTS.items():
if tld is 'default':
# Skip the default. We'll fall back to it if we don't find
# anything.
continue
if self.host and self.host.endswith(tld):
assume_role_policy_document = policy
break
if not assume_role_policy_document:
assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default']
# Dump the policy (either user-supplied ``dict`` or one of the defaults)
return json.dumps(assume_role_policy_document)
def create_role(self, role_name, assume_role_policy_document=None, path=None):
"""
Creates a new role for your AWS account.
The policy grants permission to an EC2 instance to assume the role.
The policy is URL-encoded according to RFC 3986. Currently, only EC2
instances can assume roles.
:type role_name: string
:param role_name: Name of the role to create.
:type assume_role_policy_document: ``string`` or ``dict``
:param assume_role_policy_document: The policy that grants an entity
permission to assume the role.
:type path: string
:param path: The path to the role.
"""
params = {
'RoleName': role_name,
'AssumeRolePolicyDocument': self._build_policy(
assume_role_policy_document
),
}
if path is not None:
params['Path'] = path
return self.get_response('CreateRole', params)
def delete_instance_profile(self, instance_profile_name):
"""
Deletes the specified instance profile. The instance profile must not
have an associated role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to delete.
"""
return self.get_response(
'DeleteInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def delete_role(self, role_name):
"""
Deletes the specified role. The role must not have any policies
attached.
:type role_name: string
:param role_name: Name of the role to delete.
"""
return self.get_response('DeleteRole', {'RoleName': role_name})
def delete_role_policy(self, role_name, policy_name):
"""
Deletes the specified policy associated with the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to delete.
"""
return self.get_response(
'DeleteRolePolicy',
{'RoleName': role_name, 'PolicyName': policy_name})
def get_instance_profile(self, instance_profile_name):
"""
Retrieves information about the specified instance profile, including
the instance profile's path, GUID, ARN, and role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to get
information about.
"""
return self.get_response('GetInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def get_role(self, role_name):
"""
Retrieves information about the specified role, including the role's
path, GUID, ARN, and the policy granting permission to EC2 to assume
the role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
"""
return self.get_response('GetRole', {'RoleName': role_name})
def get_role_policy(self, role_name, policy_name):
"""
Retrieves the specified policy document for the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to get.
"""
return self.get_response('GetRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name})
def list_instance_profiles(self, path_prefix=None, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified path prefix. If
there are none, the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results. For
example: /application_abc/component_xyz/, which would get all
instance profiles whose path starts with
/application_abc/component_xyz/.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfiles', params,
list_marker='InstanceProfiles')
def list_instance_profiles_for_role(self, role_name, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified associated role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list instance profiles for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfilesForRole', params,
list_marker='InstanceProfiles')
def list_role_policies(self, role_name, marker=None, max_items=None):
"""
Lists the names of the policies associated with the specified role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list policies for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRolePolicies', params,
list_marker='PolicyNames')
def list_roles(self, path_prefix=None, marker=None, max_items=None):
"""
Lists the roles that have the specified path prefix. If there are none,
the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRoles', params, list_marker='Roles')
def put_role_policy(self, role_name, policy_name, policy_document):
"""
Adds (or updates) a policy document associated with the specified role.
:type role_name: string
:param role_name: Name of the role to associate the policy with.
:type policy_name: string
:param policy_name: Name of the policy document.
:type policy_document: string
:param policy_document: The policy document.
"""
return self.get_response('PutRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name,
'PolicyDocument': policy_document})
def remove_role_from_instance_profile(self, instance_profile_name,
role_name):
"""
Removes the specified role from the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to remove.
"""
return self.get_response('RemoveRoleFromInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def update_assume_role_policy(self, role_name, policy_document):
"""
Updates the policy that grants an entity permission to assume a role.
Currently, only an Amazon EC2 instance can assume a role.
:type role_name: string
:param role_name: Name of the role to update.
:type policy_document: string
:param policy_document: The policy that grants an entity permission to
assume the role.
"""
return self.get_response('UpdateAssumeRolePolicy',
{'RoleName': role_name,
'PolicyDocument': policy_document})
def create_saml_provider(self, saml_metadata_document, name):
"""
Creates an IAM entity to describe an identity provider (IdP)
that supports SAML 2.0.
The SAML provider that you create with this operation can be
used as a principal in a role's trust policy to establish a
trust relationship between AWS and a SAML identity provider.
You can create an IAM role that supports Web-based single
sign-on (SSO) to the AWS Management Console or one that
supports API access to AWS.
When you create the SAML provider, you upload an a SAML
metadata document that you get from your IdP and that includes
the issuer's name, expiration information, and keys that can
be used to validate the SAML authentication response
(assertions) that are received from the IdP. You must generate
the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires `Signature Version 4`_.
For more information, see `Giving Console Access Using SAML`_
and `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Credentials guide.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
For more information, see `Creating Temporary Security Credentials for
SAML Federation`_ in the Using Temporary Security Credentials
guide.
:type name: string
:param name: The name of the provider to create.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'Name': name,
}
return self.get_response('CreateSAMLProvider', params)
def list_saml_providers(self):
"""
Lists the SAML providers in the account.
This operation requires `Signature Version 4`_.
"""
return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList')
def get_saml_provider(self, saml_provider_arn):
"""
Returns the SAML provider metadocument that was uploaded when
the provider was created or updated.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to get information about.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('GetSAMLProvider', params)
def update_saml_provider(self, saml_provider_arn, saml_metadata_document):
"""
Updates the metadata document for an existing SAML provider.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to update.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'SAMLProviderArn': saml_provider_arn,
}
return self.get_response('UpdateSAMLProvider', params)
def delete_saml_provider(self, saml_provider_arn):
"""
Deletes a SAML provider.
Deleting the provider does not update any roles that reference
the SAML provider as a principal in their trust policies. Any
attempt to assume a role that references a SAML provider that
has been deleted will fail.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to delete.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('DeleteSAMLProvider', params)
#
# IAM Reports
#
def generate_credential_report(self):
"""
Generates a credential report for an account
A new credential report can only be generated every 4 hours. If one
hasn't been generated in the last 4 hours then get_credential_report
will error when called
"""
params = {}
return self.get_response('GenerateCredentialReport', params)
def get_credential_report(self):
"""
Retrieves a credential report for an account
A report must have been generated in the last 4 hours to succeed.
The report is returned as a base64 encoded blob within the response.
"""
params = {}
return self.get_response('GetCredentialReport', params)
def create_virtual_mfa_device(self, path, device_name):
"""
Creates a new virtual MFA device for the AWS account.
After creating the virtual MFA, use enable-mfa-device to
attach the MFA device to an IAM user.
:type path: string
:param path: The path for the virtual MFA device.
:type device_name: string
:param device_name: The name of the virtual MFA device.
Used with path to uniquely identify a virtual MFA device.
"""
params = {
'Path': path,
'VirtualMFADeviceName': device_name
}
return self.get_response('CreateVirtualMFADevice', params)
#
# IAM password policy
#
def get_account_password_policy(self):
"""
Returns the password policy for the AWS account.
"""
params = {}
return self.get_response('GetAccountPasswordPolicy', params)
def delete_account_password_policy(self):
"""
Delete the password policy currently set for the AWS account.
"""
params = {}
return self.get_response('DeleteAccountPasswordPolicy', params)
def update_account_password_policy(self, allow_users_to_change_password=None,
hard_expiry=None, max_password_age=None ,
minimum_password_length=None ,
password_reuse_prevention=None,
require_lowercase_characters=None,
require_numbers=None, require_symbols=None ,
require_uppercase_characters=None):
"""
Update the password policy for the AWS account.
Notes: unset parameters will be reset to Amazon default settings!
Most of the password policy settings are enforced the next time your users
change their passwords. When you set minimum length and character type
requirements, they are enforced the next time your users change their
passwords - users are not forced to change their existing passwords, even
if the pre-existing passwords do not adhere to the updated password
policy. When you set a password expiration period, the expiration period
is enforced immediately.
:type allow_users_to_change_password: bool
:param allow_users_to_change_password: Allows all IAM users in your account
to use the AWS Management Console to change their own passwords.
:type hard_expiry: bool
:param hard_expiry: Prevents IAM users from setting a new password after
their password has expired.
:type max_password_age: int
:param max_password_age: The number of days that an IAM user password is valid.
:type minimum_password_length: int
:param minimum_password_length: The minimum number of characters allowed in
an IAM user password.
:type password_reuse_prevention: int
:param password_reuse_prevention: Specifies the number of previous passwords
that IAM users are prevented from reusing.
:type require_lowercase_characters: bool
:param require_lowercase_characters: Specifies whether IAM user passwords
must contain at least one lowercase character from the ISO basic Latin
alphabet (``a`` to ``z``).
:type require_numbers: bool
:param require_numbers: Specifies whether IAM user passwords must contain at
least one numeric character (``0`` to ``9``).
:type require_symbols: bool
:param require_symbols: Specifies whether IAM user passwords must contain at
least one of the following non-alphanumeric characters:
``! @ # $ % ^ & * ( ) _ + - = [ ] { } | '``
:type require_uppercase_characters: bool
:param require_uppercase_characters: Specifies whether IAM user passwords
must contain at least one uppercase character from the ISO basic Latin
alphabet (``A`` to ``Z``).
"""
params = {}
if allow_users_to_change_password is not None and type(allow_users_to_change_password) is bool:
params['AllowUsersToChangePassword'] = str(allow_users_to_change_password).lower()
if hard_expiry is not None and type(allow_users_to_change_password) is bool:
params['HardExpiry'] = str(hard_expiry).lower()
if max_password_age is not None:
params['MaxPasswordAge'] = max_password_age
if minimum_password_length is not None:
params['MinimumPasswordLength'] = minimum_password_length
if password_reuse_prevention is not None:
params['PasswordReusePrevention'] = password_reuse_prevention
if require_lowercase_characters is not None and type(allow_users_to_change_password) is bool:
params['RequireLowercaseCharacters'] = str(require_lowercase_characters).lower()
if require_numbers is not None and type(allow_users_to_change_password) is bool:
params['RequireNumbers'] = str(require_numbers).lower()
if require_symbols is not None and type(allow_users_to_change_password) is bool:
params['RequireSymbols'] = str(require_symbols).lower()
if require_uppercase_characters is not None and type(allow_users_to_change_password) is bool:
params['RequireUppercaseCharacters'] = str(require_uppercase_characters).lower()
return self.get_response('UpdateAccountPasswordPolicy', params)
def create_policy(self, policy_name, policy_document, path='/',
description=None):
"""
Create a policy.
:type policy_name: string
:param policy_name: The name of the new policy
:type policy_document string
:param policy_document: The document of the new policy
:type path: string
:param path: The path in which the policy will be created.
Defaults to /.
:type description: string
:param path: A description of the new policy.
"""
params = {'PolicyName': policy_name,
'PolicyDocument': policy_document,
'Path': path}
if description is not None:
params['Description'] = str(description)
return self.get_response('CreatePolicy', params)
def create_policy_version(
self,
policy_arn,
policy_document,
set_as_default=None):
"""
Create a policy version.
:type policy_arn: string
:param policy_arn: The ARN of the policy
:type policy_document string
:param policy_document: The document of the new policy version
:type set_as_default: bool
:param set_as_default: Sets the policy version as default
Defaults to None.
"""
params = {'PolicyArn': policy_arn,
'PolicyDocument': policy_document}
if type(set_as_default) == bool:
params['SetAsDefault'] = str(set_as_default).lower()
return self.get_response('CreatePolicyVersion', params)
def delete_policy(self, policy_arn):
"""
Delete a policy.
:type policy_arn: string
:param policy_arn: The ARN of the policy to delete
"""
params = {'PolicyArn': policy_arn}
return self.get_response('DeletePolicy', params)
def delete_policy_version(self, policy_arn, version_id):
"""
Delete a policy version.
:type policy_arn: string
:param policy_arn: The ARN of the policy to delete a version from
:type version_id: string
:param version_id: The id of the version to delete
"""
params = {'PolicyArn': policy_arn,
'VersionId': version_id}
return self.get_response('DeletePolicyVersion', params)
def get_policy(self, policy_arn):
"""
Get policy information.
:type policy_arn: string
:param policy_arn: The ARN of the policy to get information for
"""
params = {'PolicyArn': policy_arn}
return self.get_response('GetPolicy', params)
def get_policy_version(self, policy_arn, version_id):
"""
Get policy information.
:type policy_arn: string
:param policy_arn: The ARN of the policy to get information for a
specific version
:type version_id: string
:param version_id: The id of the version to get information for
"""
params = {'PolicyArn': policy_arn,
'VersionId': version_id}
return self.get_response('GetPolicyVersion', params)
def list_policies(self, marker=None, max_items=None, only_attached=None,
path_prefix=None, scope=None):
"""
List policies of account.
:type marker: string
:param marker: A marker used for pagination (received from previous
accesses)
:type max_items: int
:param max_items: Send only max_items; allows paginations
:type only_attached: bool
:param only_attached: Send only policies attached to other resources
:type path_prefix: string
:param path_prefix: Send only items prefixed by this path
:type scope: string
:param scope: AWS|Local. Choose between AWS policies or your own
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
if type(only_attached) == bool:
params['OnlyAttached'] = str(only_attached).lower()
if scope is not None:
params['Scope'] = scope
return self.get_response(
'ListPolicies',
params,
list_marker='Policies')
def list_policy_versions(self, policy_arn, marker=None, max_items=None):
"""
List policy versions.
:type policy_arn: string
:param policy_arn: The ARN of the policy to get versions of
:type marker: string
:param marker: A marker used for pagination (received from previous
accesses)
:type max_items: int
:param max_items: Send only max_items; allows paginations
"""
params = {'PolicyArn': policy_arn}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response(
'ListPolicyVersions',
params,
list_marker='Versions')
def set_default_policy_version(self, policy_arn, version_id):
"""
Set default policy version.
:type policy_arn: string
:param policy_arn: The ARN of the policy to set the default version
for
:type version_id: string
:param version_id: The id of the version to set as default
"""
params = {'PolicyArn': policy_arn,
'VersionId': version_id}
return self.get_response('SetDefaultPolicyVersion', params)
def list_entities_for_policy(self, policy_arn, path_prefix=None,
marker=None, max_items=None,
entity_filter=None):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to get entities for
:type marker: string
:param marker: A marker used for pagination (received from previous
accesses)
:type max_items: int
:param max_items: Send only max_items; allows paginations
:type path_prefix: string
:param path_prefix: Send only items prefixed by this path
:type entity_filter: string
:param entity_filter: Which entity type of User | Role | Group |
LocalManagedPolicy | AWSManagedPolicy to return
"""
params = {'PolicyArn': policy_arn}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if entity_filter is not None:
params['EntityFilter'] = entity_filter
return self.get_response('ListEntitiesForPolicy', params,
list_marker=('PolicyGroups',
'PolicyUsers',
'PolicyRoles'))
def attach_group_policy(self, policy_arn, group_name):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to attach
:type group_name: string
:param group_name: Group to attach the policy to
"""
params = {'PolicyArn': policy_arn, 'GroupName': group_name}
return self.get_response('AttachGroupPolicy', params)
def attach_role_policy(self, policy_arn, role_name):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to attach
:type role_name: string
:param role_name: Role to attach the policy to
"""
params = {'PolicyArn': policy_arn, 'RoleName': role_name}
return self.get_response('AttachRolePolicy', params)
def attach_user_policy(self, policy_arn, user_name):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to attach
:type user_name: string
:param user_name: User to attach the policy to
"""
params = {'PolicyArn': policy_arn, 'UserName': user_name}
return self.get_response('AttachUserPolicy', params)
def detach_group_policy(self, policy_arn, group_name):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to detach
:type group_name: string
:param group_name: Group to detach the policy from
"""
params = {'PolicyArn': policy_arn, 'GroupName': group_name}
return self.get_response('DetachGroupPolicy', params)
def detach_role_policy(self, policy_arn, role_name):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to detach
:type role_name: string
:param role_name: Role to detach the policy from
"""
params = {'PolicyArn': policy_arn, 'RoleName': role_name}
return self.get_response('DetachRolePolicy', params)
def detach_user_policy(self, policy_arn, user_name):
"""
:type policy_arn: string
:param policy_arn: The ARN of the policy to detach
:type user_name: string
:param user_name: User to detach the policy from
"""
params = {'PolicyArn': policy_arn, 'UserName': user_name}
return self.get_response('DetachUserPolicy', params)
|
apache/airflow | refs/heads/main | airflow/providers/google/cloud/sensors/dataproc.py | 2 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Dataproc Job sensor."""
from google.cloud.dataproc_v1beta2.types import JobStatus
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataproc import DataprocHook
from airflow.sensors.base import BaseSensorOperator
class DataprocJobSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted Dataproc job.
:param project_id: The ID of the google cloud project in which
to create the cluster. (templated)
:type project_id: str
:param dataproc_job_id: The Dataproc job ID to poll. (templated)
:type dataproc_job_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request. (templated)
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = ('project_id', 'location', 'dataproc_job_id')
ui_color = '#f0eee4'
def __init__(
self,
*,
project_id: str,
dataproc_job_id: str,
location: str,
gcp_conn_id: str = 'google_cloud_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataproc_job_id = dataproc_job_id
self.location = location
def poke(self, context: dict) -> bool:
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id)
job = hook.get_job(job_id=self.dataproc_job_id, location=self.location, project_id=self.project_id)
state = job.status.state
if state == JobStatus.State.ERROR:
raise AirflowException(f'Job failed:\n{job}')
elif state in {
JobStatus.State.CANCELLED,
JobStatus.State.CANCEL_PENDING,
JobStatus.State.CANCEL_STARTED,
}:
raise AirflowException(f'Job was cancelled:\n{job}')
elif JobStatus.State.DONE == state:
self.log.debug("Job %s completed successfully.", self.dataproc_job_id)
return True
elif JobStatus.State.ATTEMPT_FAILURE == state:
self.log.debug("Job %s attempt has failed.", self.dataproc_job_id)
self.log.info("Waiting for job %s to complete.", self.dataproc_job_id)
return False
|
piffey/ansible | refs/heads/devel | test/units/modules/network/nxos/test_nxos_vxlan_vtep.py | 57 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
super(TestNxosVxlanVtepVniModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosVxlanVtepVniModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
|
lightopa/Aiopa-Battles | refs/heads/master | lib/raven/transport/eventlet.py | 16 | """
raven.transport.eventlet
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from raven.transport.http import HTTPTransport
try:
import eventlet
try:
from eventlet.green import urllib2 as eventlet_urllib2
except ImportError:
from eventlet.green.urllib import request as eventlet_urllib2
has_eventlet = True
except:
has_eventlet = False
class EventletHTTPTransport(HTTPTransport):
scheme = ['eventlet+http', 'eventlet+https']
def __init__(self, parsed_url, pool_size=100, **kwargs):
if not has_eventlet:
raise ImportError('EventletHTTPTransport requires eventlet.')
super(EventletHTTPTransport, self).__init__(parsed_url, **kwargs)
# remove the eventlet+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def _send_payload(self, payload):
req = eventlet_urllib2.Request(self._url, headers=payload[1])
try:
if sys.version_info < (2, 6):
response = eventlet_urllib2.urlopen(req, payload[0]).read()
else:
response = eventlet_urllib2.urlopen(req, payload[0],
self.timeout).read()
return response
except Exception as err:
return err
def send(self, data, headers):
"""
Spawn an async request to a remote webserver.
"""
eventlet.spawn(self._send_payload, (data, headers))
|
geggo/pyface | refs/heads/master | pyface/ui/qt4/code_editor/pygments_highlighter.py | 3 | #------------------------------------------------------------------------------
# Copyright (c) 2010, Enthought Inc
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
#
# Author: Enthought Inc
# Description: <Enthought pyface code editor>
#------------------------------------------------------------------------------
from pyface.qt import QtGui
from pygments.lexer import RegexLexer, _TokenType, Text, Error
from pygments.lexers import CLexer, CppLexer, PythonLexer, get_lexer_by_name
from pygments.styles.default import DefaultStyle
from pygments.token import Comment
def get_tokens_unprocessed(self, text, stack=('root',)):
""" Split ``text`` into (tokentype, text) pairs.
Monkeypatched to store the final stack on the object itself.
The `text` parameter that gets passed is only the current line, so to
highlight things like multiline strings correctly, we need to retrieve
the state from the previous line (this is done in PygmentsHighlighter,
below), and use it to continue processing the current line.
"""
pos = 0
tokendefs = self._tokens
if hasattr(self, '_saved_state_stack'):
statestack = list(self._saved_state_stack)
else:
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
self._saved_state_stack = list(statestack)
# Monkeypatch!
RegexLexer.get_tokens_unprocessed = get_tokens_unprocessed
# Even with the above monkey patch to store state, multiline comments do not
# work since they are stateless (Pygments uses a single multiline regex for
# these comments, but Qt lexes by line). So we need to add a state for comments
# to the C and C++ lexers. This means that nested multiline comments will appear
# to be valid C/C++, but this is better than the alternative for now.
def replace_pattern(tokens, new_pattern):
""" Given a RegexLexer token dictionary 'tokens', replace all patterns that
match the token specified in 'new_pattern' with 'new_pattern'.
"""
for state in tokens.values():
for index, pattern in enumerate(state):
if isinstance(pattern, tuple) and pattern[1] == new_pattern[1]:
state[index] = new_pattern
# More monkeypatching!
comment_start = (r'/\*', Comment.Multiline, 'comment')
comment_state = [ (r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline) ]
replace_pattern(CLexer.tokens, comment_start)
replace_pattern(CppLexer.tokens, comment_start)
CLexer.tokens['comment'] = comment_state
CppLexer.tokens['comment'] = comment_state
class BlockUserData(QtGui.QTextBlockUserData):
""" Storage for the user data associated with each line.
"""
syntax_stack = ('root',)
def __init__(self, **kwds):
QtGui.QTextBlockUserData.__init__(self)
for key, value in kwds.iteritems():
setattr(self, key, value)
def __repr__(self):
attrs = ['syntax_stack']
kwds = ', '.join([ '%s=%r' % (attr, getattr(self, attr))
for attr in attrs ])
return 'BlockUserData(%s)' % kwds
class PygmentsHighlighter(QtGui.QSyntaxHighlighter):
""" Syntax highlighter that uses Pygments for parsing. """
def __init__(self, parent, lexer=None):
super(PygmentsHighlighter, self).__init__(parent)
try:
self._lexer = get_lexer_by_name(lexer)
except:
self._lexer = PythonLexer()
self._style = DefaultStyle
# Caches for formats and brushes.
self._brushes = {}
self._formats = {}
def highlightBlock(self, qstring):
""" Highlight a block of text.
"""
qstring = unicode(qstring)
prev_data = self.previous_block_data()
if prev_data is not None:
self._lexer._saved_state_stack = prev_data.syntax_stack
elif hasattr(self._lexer, '_saved_state_stack'):
del self._lexer._saved_state_stack
index = 0
# Lex the text using Pygments
for token, text in self._lexer.get_tokens(qstring):
l = len(text)
format = self._get_format(token)
if format is not None:
self.setFormat(index, l, format)
index += l
if hasattr(self._lexer, '_saved_state_stack'):
data = BlockUserData(syntax_stack=self._lexer._saved_state_stack)
self.currentBlock().setUserData(data)
# there is a bug in pyside and it will crash unless we
# hold on to the reference a little longer
data = self.currentBlock().userData()
# Clean up for the next go-round.
del self._lexer._saved_state_stack
def previous_block_data(self):
""" Convenience method for returning the previous block's user data.
"""
return self.currentBlock().previous().userData()
def _get_format(self, token):
""" Returns a QTextCharFormat for token or None.
"""
if token in self._formats:
return self._formats[token]
result = None
for key, value in self._style.style_for_token(token) .items():
if value:
if result is None:
result = QtGui.QTextCharFormat()
if key == 'color':
result.setForeground(self._get_brush(value))
elif key == 'bgcolor':
result.setBackground(self._get_brush(value))
elif key == 'bold':
result.setFontWeight(QtGui.QFont.Bold)
elif key == 'italic':
result.setFontItalic(True)
elif key == 'underline':
result.setUnderlineStyle(
QtGui.QTextCharFormat.SingleUnderline)
elif key == 'sans':
result.setFontStyleHint(QtGui.QFont.SansSerif)
elif key == 'roman':
result.setFontStyleHint(QtGui.QFont.Times)
elif key == 'mono':
result.setFontStyleHint(QtGui.QFont.TypeWriter)
elif key == 'border':
# Borders are normally used for errors. We can't do a border
# so instead we do a wavy underline
result.setUnderlineStyle(
QtGui.QTextCharFormat.WaveUnderline)
result.setUnderlineColor(self._get_color(value))
self._formats[token] = result
return result
def _get_brush(self, color):
""" Returns a brush for the color.
"""
result = self._brushes.get(color)
if result is None:
qcolor = self._get_color(color)
result = QtGui.QBrush(qcolor)
self._brushes[color] = result
return result
def _get_color(self, color):
qcolor = QtGui.QColor()
qcolor.setRgb(int(color[:2],base=16),
int(color[2:4], base=16),
int(color[4:6], base=16))
return qcolor
|
zhaodelong/django | refs/heads/master | tests/many_to_one/tests.py | 88 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, First, Parent, Record, Relation, Reporter,
School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(sorted(f.name for f in Reporter._meta.get_fields())),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(
FieldError,
expected_message % ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields())),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield'
)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
self.assertRaises(School.DoesNotExist, lambda: private_student.school)
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
|
ihsanudin/odoo | refs/heads/8.0 | addons/l10n_be_hr_payroll_account/__init__.py | 430 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jokajak/itweb | refs/heads/master | data/env/lib/python2.6/site-packages/simplejson-2.0.9-py2.6-linux-x86_64.egg/simplejson/tests/test_pass1.py | 259 | from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
try:
json.dumps(res, allow_nan=False)
except ValueError:
pass
else:
self.fail("23456789012E666 should be out of range")
|
ilastikdev/ilastik | refs/heads/master | ilastik/applets/base/__init__.py | 115 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
|
gptech/ansible | refs/heads/devel | lib/ansible/modules/notification/flowdock.py | 35 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <coddington@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: [ ]
'''
EXAMPLES = '''
- flowdock:
type: inbox
token: AAAAAA
from_address: user@example.com
source: my cool app
msg: test from ansible
subject: test subject
- flowdock:
type: chat
token: AAAAAA
external_user_name: testuser
msg: test from ansible
tags: tag1,tag2,tag3
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# required params for the 'inbox' type
for item in [ 'from_address', 'source', 'subject' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
savoirfairelinux/odoo | refs/heads/master | addons/board/controllers.py | 44 | # -*- coding: utf-8 -*-
from xml.etree import ElementTree
from openerp.addons.web.controllers.main import load_actions_from_ir_values
from openerp.addons.web.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, menu_id, action_id, context_to_save, domain, view_mode, name=''):
# FIXME move this method to board.board model
dashboard_action = load_actions_from_ir_values('action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
if dashboard_action:
action = dashboard_action[0][2]
if action['res_model'] == 'board.board' and action['views'][0][1] == 'form':
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.session.model(action['res_model']).fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name': str(action_id),
'string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, 'utf-8')
return request.session.model('ir.ui.view.custom').create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return False
|
rleigh-dundee/openmicroscopy | refs/heads/develop | components/tools/OmeroPy/src/omero/plugins/download.py | 4 | #!/usr/bin/env python
"""
download plugin
Plugin read by omero.cli.Cli during initialization. The method(s)
defined here will be added to the Cli class for later use.
Copyright 2007 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import sys
from omero.cli import BaseControl, CLI
HELP = """Download the given file id to the given filename"""
class DownloadControl(BaseControl):
def _configure(self, parser):
parser.add_argument("id", help="OriginalFile id")
parser.add_argument("filename", help="Local filename to be saved to. '-' for stdout")
parser.set_defaults(func=self.__call__)
def __call__(self, args):
from omero_model_OriginalFileI import OriginalFileI as OFile
orig_file = OFile(long(args.id))
target_file = str(args.filename)
client = self.ctx.conn(args)
if target_file == "-":
client.download(orig_file, filehandle = sys.stdout)
sys.stdout.flush()
else:
client.download(orig_file, target_file)
try:
register("download", DownloadControl, HELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("download", DownloadControl, HELP)
cli.invoke(sys.argv[1:])
|
SCPR/accountability-tracker | refs/heads/master | maplight_finance/migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Initiative',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('initiative_identifier', models.CharField(max_length=500, null=True, verbose_name=b'Initiative', blank=True)),
('initiative_slug', models.SlugField(max_length=140, null=True, verbose_name=b'Initiative Slug', blank=True)),
('description', models.CharField(max_length=500, null=True, verbose_name=b'Initiative description', blank=True)),
('document_url', models.URLField(max_length=1024, null=True, verbose_name=b'URL to analysis', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InitiativeContributor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('stance', models.CharField(max_length=500, null=True, verbose_name=b'Stance on Initiative', blank=True)),
('transaction_name', models.CharField(max_length=500, null=True, verbose_name=b'Transaction Name', blank=True)),
('committee_id', models.CharField(max_length=500, null=True, blank=True)),
('name', models.CharField(max_length=500, null=True, verbose_name=b"Contributor's Name", blank=True)),
('employer', models.CharField(max_length=500, null=True, verbose_name=b"Contributor's Employer", blank=True)),
('occupation', models.CharField(max_length=500, null=True, verbose_name=b"Contributor's Occupation", blank=True)),
('city', models.CharField(max_length=500, null=True, verbose_name=b"Contributor's City", blank=True)),
('state', models.CharField(max_length=500, null=True, verbose_name=b"Contributor's State", blank=True)),
('zip_code', models.CharField(max_length=500, null=True, verbose_name=b"Contributor's Zip code", blank=True)),
('id_number', models.IntegerField(max_length=500, null=True, blank=True)),
('payment_type', models.CharField(max_length=500, null=True, blank=True)),
('amount', models.DecimalField(null=True, verbose_name=b"Contributor's Amount", max_digits=11, decimal_places=2, blank=True)),
('transaction_date', models.DateField(null=True, verbose_name=b'Transaction Date', blank=True)),
('filed_date', models.DateField(null=True, verbose_name=b'Filing Date', blank=True)),
('transaction_number', models.CharField(max_length=500, null=True, blank=True)),
('is_individual', models.CharField(max_length=500, null=True, blank=True)),
('donor_type', models.CharField(max_length=500, null=True, blank=True)),
('industry', models.CharField(max_length=500, null=True, blank=True)),
('created_date', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'date created')),
('initiative_identifier', models.ForeignKey(related_name=b'initiative_initiative_identifier', blank=True, to='maplight_finance.Initiative', null=True)),
],
options={
},
bases=(models.Model,),
),
]
|
rev2004/android2cloud.app-engine | refs/heads/master | google_appengine/lib/django/django/db/models/query.py | 32 | from django.db import backend, connection, transaction
from django.db.models.fields import DateField, FieldDoesNotExist
from django.db.models.fields.generic import GenericRelation
from django.db.models import signals
from django.dispatch import dispatcher
from django.utils.datastructures import SortedDict
import operator
import re
# For Python 2.3
if not hasattr(__builtins__, 'set'):
from sets import Set as set
# The string constant used to separate query parts
LOOKUP_SEPARATOR = '__'
# The list of valid query types
QUERY_TERMS = (
'exact', 'iexact', 'contains', 'icontains',
'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith',
'range', 'year', 'month', 'day', 'isnull', 'search',
)
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
class EmptyResultSet(Exception):
pass
####################
# HELPER FUNCTIONS #
####################
# Django currently supports two forms of ordering.
# Form 1 (deprecated) example:
# order_by=(('pub_date', 'DESC'), ('headline', 'ASC'), (None, 'RANDOM'))
# Form 2 (new-style) example:
# order_by=('-pub_date', 'headline', '?')
# Form 1 is deprecated and will no longer be supported for Django's first
# official release. The following code converts from Form 1 to Form 2.
LEGACY_ORDERING_MAPPING = {'ASC': '_', 'DESC': '-_', 'RANDOM': '?'}
def handle_legacy_orderlist(order_list):
if not order_list or isinstance(order_list[0], basestring):
return order_list
else:
import warnings
new_order_list = [LEGACY_ORDERING_MAPPING[j.upper()].replace('_', str(i)) for i, j in order_list]
warnings.warn("%r ordering syntax is deprecated. Use %r instead." % (order_list, new_order_list), DeprecationWarning)
return new_order_list
def orderfield2column(f, opts):
try:
return opts.get_field(f, False).column
except FieldDoesNotExist:
return f
def orderlist2sql(order_list, opts, prefix=''):
if prefix.endswith('.'):
prefix = backend.quote_name(prefix[:-1]) + '.'
output = []
for f in handle_legacy_orderlist(order_list):
if f.startswith('-'):
output.append('%s%s DESC' % (prefix, backend.quote_name(orderfield2column(f[1:], opts))))
elif f == '?':
output.append(backend.get_random_function_sql())
else:
output.append('%s%s ASC' % (prefix, backend.quote_name(orderfield2column(f, opts))))
return ', '.join(output)
def quote_only_if_word(word):
if re.search('\W', word): # Don't quote if there are spaces or non-word chars.
return word
else:
return backend.quote_name(word)
class QuerySet(object):
"Represents a lazy database lookup for a set of objects"
def __init__(self, model=None):
self.model = model
self._filters = Q()
self._order_by = None # Ordering, e.g. ('date', '-name'). If None, use model's ordering.
self._select_related = False # Whether to fill cache for related objects.
self._max_related_depth = 0 # Maximum "depth" for select_related
self._distinct = False # Whether the query should use SELECT DISTINCT.
self._select = {} # Dictionary of attname -> SQL.
self._where = [] # List of extra WHERE clauses to use.
self._params = [] # List of params to use for extra WHERE clauses.
self._tables = [] # List of extra tables to use.
self._offset = None # OFFSET clause.
self._limit = None # LIMIT clause.
self._result_cache = None
########################
# PYTHON MAGIC METHODS #
########################
def __repr__(self):
return repr(self._get_data())
def __len__(self):
return len(self._get_data())
def __iter__(self):
return iter(self._get_data())
def __getitem__(self, k):
"Retrieve an item or slice from the set of results."
if not isinstance(k, (slice, int)):
raise TypeError
assert (not isinstance(k, slice) and (k >= 0)) \
or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0)), \
"Negative indexing is not supported."
if self._result_cache is None:
if isinstance(k, slice):
# Offset:
if self._offset is None:
offset = k.start
elif k.start is None:
offset = self._offset
else:
offset = self._offset + k.start
# Now adjust offset to the bounds of any existing limit:
if self._limit is not None and k.start is not None:
limit = self._limit - k.start
else:
limit = self._limit
# Limit:
if k.stop is not None and k.start is not None:
if limit is None:
limit = k.stop - k.start
else:
limit = min((k.stop - k.start), limit)
else:
if limit is None:
limit = k.stop
else:
if k.stop is not None:
limit = min(k.stop, limit)
if k.step is None:
return self._clone(_offset=offset, _limit=limit)
else:
return list(self._clone(_offset=offset, _limit=limit))[::k.step]
else:
try:
return list(self._clone(_offset=k, _limit=1))[0]
except self.model.DoesNotExist, e:
raise IndexError, e.args
else:
return self._result_cache[k]
def __and__(self, other):
combined = self._combine(other)
combined._filters = self._filters & other._filters
return combined
def __or__(self, other):
combined = self._combine(other)
combined._filters = self._filters | other._filters
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"Performs the SELECT database lookup of this QuerySet."
try:
select, sql, params = self._get_sql_clause()
except EmptyResultSet:
raise StopIteration
# self._select is a dictionary, and dictionaries' key order is
# undefined, so we convert it to a list of tuples.
extra_select = self._select.items()
cursor = connection.cursor()
cursor.execute("SELECT " + (self._distinct and "DISTINCT " or "") + ",".join(select) + sql, params)
fill_cache = self._select_related
index_end = len(self.model._meta.fields)
while 1:
rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)
if not rows:
raise StopIteration
for row in rows:
if fill_cache:
obj, index_end = get_cached_row(klass=self.model, row=row,
index_start=0, max_depth=self._max_related_depth)
else:
obj = self.model(*row[:index_end])
for i, k in enumerate(extra_select):
setattr(obj, k[0], row[index_end+i])
yield obj
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the queryset is already cached (i.e. self._result_cache is set) this
simply returns the length of the cached results set to avoid multiple
SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
counter = self._clone()
counter._order_by = ()
counter._select_related = False
offset = counter._offset
limit = counter._limit
counter._offset = None
counter._limit = None
try:
select, sql, params = counter._get_sql_clause()
except EmptyResultSet:
return 0
cursor = connection.cursor()
if self._distinct:
id_col = "%s.%s" % (backend.quote_name(self.model._meta.db_table),
backend.quote_name(self.model._meta.pk.column))
cursor.execute("SELECT COUNT(DISTINCT(%s))" % id_col + sql, params)
else:
cursor.execute("SELECT COUNT(*)" + sql, params)
count = cursor.fetchone()[0]
# Apply any offset and limit constraints manually, since using LIMIT or
# OFFSET in SQL doesn't change the output of COUNT.
if offset:
count = max(0, count - offset)
if limit:
count = min(limit, count)
return count
def get(self, *args, **kwargs):
"Performs the SELECT and returns a single object matching the given keyword arguments."
clone = self.filter(*args, **kwargs)
# clean up SQL by removing unneeded ORDER BY
if not clone._order_by:
clone._order_by = ()
obj_list = list(clone)
if len(obj_list) < 1:
raise self.model.DoesNotExist, "%s matching query does not exist." % self.model._meta.object_name
assert len(obj_list) == 1, "get() returned more than one %s -- it returned %s! Lookup parameters were %s" % (self.model._meta.object_name, len(obj_list), kwargs)
return obj_list[0]
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
obj.save()
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert len(kwargs), 'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
obj.save()
return obj, True
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self._limit is None and self._offset is None, \
"Cannot change a query once a slice has been taken."
return self._clone(_limit=1, _order_by=('-'+latest_by,)).get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self._limit is None and self._offset is None, \
"Cannot use 'limit' or 'offset' with in_bulk"
assert isinstance(id_list, (tuple, list)), "in_bulk() must be provided with a list of IDs."
id_list = list(id_list)
if id_list == []:
return {}
qs = self._clone()
qs._where.append("%s.%s IN (%s)" % (backend.quote_name(self.model._meta.db_table), backend.quote_name(self.model._meta.pk.column), ",".join(['%s'] * len(id_list))))
qs._params.extend(id_list)
return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self._limit is None and self._offset is None, \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# disable non-supported fields
del_query._select_related = False
del_query._order_by = []
# Delete objects in chunks to prevent an the list of
# related objects from becoming too long
more_objects = True
while more_objects:
# Collect all the objects to be deleted in this chunk, and all the objects
# that are related to the objects that are to be deleted
seen_objs = SortedDict()
more_objects = False
for object in del_query[0:GET_ITERATOR_CHUNK_SIZE]:
more_objects = True
object._collect_sub_objects(seen_objs)
# If one or more objects were found, delete them.
# Otherwise, stop looping.
if more_objects:
delete_objects(seen_objs)
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, _fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates
for the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), "'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), "'order' must be either 'ASC' or 'DESC'."
# Let the FieldDoesNotExist exception propagate.
field = self.model._meta.get_field(field_name, many_to_many=False)
assert isinstance(field, DateField), "%r isn't a DateField." % field_name
return self._clone(klass=DateQuerySet, _field=field, _kind=kind, _order=order)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def filter(self, *args, **kwargs):
"Returns a new QuerySet instance with the args ANDed to the existing set."
return self._filter_or_exclude(None, *args, **kwargs)
def exclude(self, *args, **kwargs):
"Returns a new QuerySet instance with NOT (args) ANDed to the existing set."
return self._filter_or_exclude(QNot, *args, **kwargs)
def _filter_or_exclude(self, mapper, *args, **kwargs):
# mapper is a callable used to transform Q objects,
# or None for identity transform
if mapper is None:
mapper = lambda x: x
if len(args) > 0 or len(kwargs) > 0:
assert self._limit is None and self._offset is None, \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if len(kwargs) > 0:
clone._filters = clone._filters & mapper(Q(**kwargs))
if len(args) > 0:
clone._filters = clone._filters & reduce(operator.and_, map(mapper, args))
return clone
def complex_filter(self, filter_obj):
"""Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (has 'get_sql' method) or a dictionary of
keyword lookup arguments."""
# This exists to support framework features such as 'limit_choices_to',
# and usually it will be more natural to use other methods.
if hasattr(filter_obj, 'get_sql'):
return self._filter_or_exclude(None, filter_obj)
else:
return self._filter_or_exclude(None, **filter_obj)
def select_related(self, true_or_false=True, depth=0):
"Returns a new QuerySet instance with '_select_related' modified."
return self._clone(_select_related=true_or_false, _max_related_depth=depth)
def order_by(self, *field_names):
"Returns a new QuerySet instance with the ordering changed."
assert self._limit is None and self._offset is None, \
"Cannot reorder a query once a slice has been taken."
return self._clone(_order_by=field_names)
def distinct(self, true_or_false=True):
"Returns a new QuerySet instance with '_distinct' modified."
return self._clone(_distinct=true_or_false)
def extra(self, select=None, where=None, params=None, tables=None):
assert self._limit is None and self._offset is None, \
"Cannot change a query once a slice has been taken"
clone = self._clone()
if select: clone._select.update(select)
if where: clone._where.extend(where)
if params: clone._params.extend(params)
if tables: clone._tables.extend(tables)
return clone
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, **kwargs):
if klass is None:
klass = self.__class__
c = klass()
c.model = self.model
c._filters = self._filters
c._order_by = self._order_by
c._select_related = self._select_related
c._max_related_depth = self._max_related_depth
c._distinct = self._distinct
c._select = self._select.copy()
c._where = self._where[:]
c._params = self._params[:]
c._tables = self._tables[:]
c._offset = self._offset
c._limit = self._limit
c.__dict__.update(kwargs)
return c
def _combine(self, other):
assert self._limit is None and self._offset is None \
and other._limit is None and other._offset is None, \
"Cannot combine queries once a slice has been taken."
assert self._distinct == other._distinct, \
"Cannot combine a unique query with a non-unique query"
# use 'other's order by
# (so that A.filter(args1) & A.filter(args2) does the same as
# A.filter(args1).filter(args2)
combined = other._clone()
if self._select: combined._select.update(self._select)
if self._where: combined._where.extend(self._where)
if self._params: combined._params.extend(self._params)
if self._tables: combined._tables.extend(self._tables)
# If 'self' is ordered and 'other' isn't, propagate 'self's ordering
if (self._order_by is not None and len(self._order_by) > 0) and \
(combined._order_by is None or len(combined._order_by) == 0):
combined._order_by = self._order_by
return combined
def _get_data(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
return self._result_cache
def _get_sql_clause(self):
opts = self.model._meta
# Construct the fundamental parts of the query: SELECT X FROM Y WHERE Z.
select = ["%s.%s" % (backend.quote_name(opts.db_table), backend.quote_name(f.column)) for f in opts.fields]
tables = [quote_only_if_word(t) for t in self._tables]
joins = SortedDict()
where = self._where[:]
params = self._params[:]
# Convert self._filters into SQL.
joins2, where2, params2 = self._filters.get_sql(opts)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
# Add additional tables and WHERE clauses based on select_related.
if self._select_related:
fill_table_cache(opts, select, tables, where,
old_prefix=opts.db_table,
cache_tables_seen=[opts.db_table],
max_depth=self._max_related_depth)
# Add any additional SELECTs.
if self._select:
select.extend(['(%s) AS %s' % (quote_only_if_word(s[1]), backend.quote_name(s[0])) for s in self._select.items()])
# Start composing the body of the SQL statement.
sql = [" FROM", backend.quote_name(opts.db_table)]
# Compose the join dictionary into SQL describing the joins.
if joins:
sql.append(" ".join(["%s %s AS %s ON %s" % (join_type, table, alias, condition)
for (alias, (table, join_type, condition)) in joins.items()]))
# Compose the tables clause into SQL.
if tables:
sql.append(", " + ", ".join(tables))
# Compose the where clause into SQL.
if where:
sql.append(where and "WHERE " + " AND ".join(where))
# ORDER BY clause
order_by = []
if self._order_by is not None:
ordering_to_use = self._order_by
else:
ordering_to_use = opts.ordering
for f in handle_legacy_orderlist(ordering_to_use):
if f == '?': # Special case.
order_by.append(backend.get_random_function_sql())
else:
if f.startswith('-'):
col_name = f[1:]
order = "DESC"
else:
col_name = f
order = "ASC"
if "." in col_name:
table_prefix, col_name = col_name.split('.', 1)
table_prefix = backend.quote_name(table_prefix) + '.'
else:
# Use the database table as a column prefix if it wasn't given,
# and if the requested column isn't a custom SELECT.
if "." not in col_name and col_name not in (self._select or ()):
table_prefix = backend.quote_name(opts.db_table) + '.'
else:
table_prefix = ''
order_by.append('%s%s %s' % (table_prefix, backend.quote_name(orderfield2column(col_name, opts)), order))
if order_by:
sql.append("ORDER BY " + ", ".join(order_by))
# LIMIT and OFFSET clauses
if self._limit is not None:
sql.append("%s " % backend.get_limit_offset_sql(self._limit, self._offset))
else:
assert self._offset is None, "'offset' is not allowed without 'limit'"
return select, " ".join(sql), params
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related and select aren't supported in values().
self._select_related = False
self._select = {}
def iterator(self):
try:
select, sql, params = self._get_sql_clause()
except EmptyResultSet:
raise StopIteration
# self._fields is a list of field names to fetch.
if self._fields:
columns = [self.model._meta.get_field(f, many_to_many=False).column for f in self._fields]
field_names = self._fields
else: # Default to all fields.
columns = [f.column for f in self.model._meta.fields]
field_names = [f.attname for f in self.model._meta.fields]
select = ['%s.%s' % (backend.quote_name(self.model._meta.db_table), backend.quote_name(c)) for c in columns]
cursor = connection.cursor()
cursor.execute("SELECT " + (self._distinct and "DISTINCT " or "") + ",".join(select) + sql, params)
while 1:
rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)
if not rows:
raise StopIteration
for row in rows:
yield dict(zip(field_names, row))
def _clone(self, klass=None, **kwargs):
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
c._fields = self._fields[:]
return c
class DateQuerySet(QuerySet):
def iterator(self):
from django.db.backends.util import typecast_timestamp
self._order_by = () # Clear this because it'll mess things up otherwise.
if self._field.null:
self._where.append('%s.%s IS NOT NULL' % \
(backend.quote_name(self.model._meta.db_table), backend.quote_name(self._field.column)))
try:
select, sql, params = self._get_sql_clause()
except EmptyResultSet:
raise StopIteration
sql = 'SELECT %s %s GROUP BY 1 ORDER BY 1 %s' % \
(backend.get_date_trunc_sql(self._kind, '%s.%s' % (backend.quote_name(self.model._meta.db_table),
backend.quote_name(self._field.column))), sql, self._order)
cursor = connection.cursor()
cursor.execute(sql, params)
# We have to manually run typecast_timestamp(str()) on the results, because
# MySQL doesn't automatically cast the result of date functions as datetime
# objects -- MySQL returns the values as strings, instead.
return [typecast_timestamp(str(row[0])) for row in cursor.fetchall()]
def _clone(self, klass=None, **kwargs):
c = super(DateQuerySet, self)._clone(klass, **kwargs)
c._field = self._field
c._kind = self._kind
c._order = self._order
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None):
super(EmptyQuerySet, self).__init__(model)
self._result_cache = []
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, **kwargs)
c._result_cache = []
return c
def _get_sql_clause(self):
raise EmptyResultSet
class QOperator(object):
"Base class for QAnd and QOr"
def __init__(self, *args):
self.args = args
def get_sql(self, opts):
joins, where, params = SortedDict(), [], []
for val in self.args:
try:
joins2, where2, params2 = val.get_sql(opts)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
except EmptyResultSet:
if not isinstance(self, QOr):
raise EmptyResultSet
if where:
return joins, ['(%s)' % self.operator.join(where)], params
return joins, [], params
class QAnd(QOperator):
"Encapsulates a combined query that uses 'AND'."
operator = ' AND '
def __or__(self, other):
return QOr(self, other)
def __and__(self, other):
if isinstance(other, QAnd):
return QAnd(*(self.args+other.args))
elif isinstance(other, (Q, QOr)):
return QAnd(*(self.args+(other,)))
else:
raise TypeError, other
class QOr(QOperator):
"Encapsulates a combined query that uses 'OR'."
operator = ' OR '
def __and__(self, other):
return QAnd(self, other)
def __or__(self, other):
if isinstance(other, QOr):
return QOr(*(self.args+other.args))
elif isinstance(other, (Q, QAnd)):
return QOr(*(self.args+(other,)))
else:
raise TypeError, other
class Q(object):
"Encapsulates queries as objects that can be combined logically."
def __init__(self, **kwargs):
self.kwargs = kwargs
def __and__(self, other):
return QAnd(self, other)
def __or__(self, other):
return QOr(self, other)
def get_sql(self, opts):
return parse_lookup(self.kwargs.items(), opts)
class QNot(Q):
"Encapsulates NOT (...) queries as objects"
def __init__(self, q):
"Creates a negation of the q object passed in."
self.q = q
def get_sql(self, opts):
try:
joins, where, params = self.q.get_sql(opts)
where2 = ['(NOT (%s))' % " AND ".join(where)]
except EmptyResultSet:
return SortedDict(), [], []
return joins, where2, params
def get_where_clause(lookup_type, table_prefix, field_name, value):
if table_prefix.endswith('.'):
table_prefix = backend.quote_name(table_prefix[:-1])+'.'
field_name = backend.quote_name(field_name)
try:
return '%s%s %s' % (table_prefix, field_name, (backend.OPERATOR_MAPPING[lookup_type] % '%s'))
except KeyError:
pass
if lookup_type == 'in':
in_string = ','.join(['%s' for id in value])
if in_string:
return '%s%s IN (%s)' % (table_prefix, field_name, in_string)
else:
raise EmptyResultSet
elif lookup_type in ('range', 'year'):
return '%s%s BETWEEN %%s AND %%s' % (table_prefix, field_name)
elif lookup_type in ('month', 'day'):
return "%s = %%s" % backend.get_date_extract_sql(lookup_type, table_prefix + field_name)
elif lookup_type == 'isnull':
return "%s%s IS %sNULL" % (table_prefix, field_name, (not value and 'NOT ' or ''))
elif lookup_type == 'search':
return backend.get_fulltext_search_sql(table_prefix + field_name)
raise TypeError, "Got invalid lookup_type: %s" % repr(lookup_type)
def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0):
"""Helper function that recursively returns an object with cache filled"""
# If we've got a max_depth set and we've exceeded that depth, bail now.
if max_depth and cur_depth > max_depth:
return None
index_end = index_start + len(klass._meta.fields)
obj = klass(*row[index_start:index_end])
for f in klass._meta.fields:
if f.rel and not f.null:
cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, cur_depth+1)
if cached_row:
rel_obj, index_end = cached_row
setattr(obj, f.get_cache_name(), rel_obj)
return obj, index_end
def fill_table_cache(opts, select, tables, where, old_prefix, cache_tables_seen, max_depth=0, cur_depth=0):
"""
Helper function that recursively populates the select, tables and where (in
place) for select_related queries.
"""
# If we've got a max_depth set and we've exceeded that depth, bail now.
if max_depth and cur_depth > max_depth:
return None
qn = backend.quote_name
for f in opts.fields:
if f.rel and not f.null:
db_table = f.rel.to._meta.db_table
if db_table not in cache_tables_seen:
tables.append(qn(db_table))
else: # The table was already seen, so give it a table alias.
new_prefix = '%s%s' % (db_table, len(cache_tables_seen))
tables.append('%s %s' % (qn(db_table), qn(new_prefix)))
db_table = new_prefix
cache_tables_seen.append(db_table)
where.append('%s.%s = %s.%s' % \
(qn(old_prefix), qn(f.column), qn(db_table), qn(f.rel.get_related_field().column)))
select.extend(['%s.%s' % (qn(db_table), qn(f2.column)) for f2 in f.rel.to._meta.fields])
fill_table_cache(f.rel.to._meta, select, tables, where, db_table, cache_tables_seen, max_depth, cur_depth+1)
def parse_lookup(kwarg_items, opts):
# Helper function that handles converting API kwargs
# (e.g. "name__exact": "tom") to SQL.
# Returns a tuple of (joins, where, params).
# 'joins' is a sorted dictionary describing the tables that must be joined
# to complete the query. The dictionary is sorted because creation order
# is significant; it is a dictionary to ensure uniqueness of alias names.
#
# Each key-value pair follows the form
# alias: (table, join_type, condition)
# where
# alias is the AS alias for the joined table
# table is the actual table name to be joined
# join_type is the type of join (INNER JOIN, LEFT OUTER JOIN, etc)
# condition is the where-like statement over which narrows the join.
# alias will be derived from the lookup list name.
#
# At present, this method only every returns INNER JOINs; the option is
# there for others to implement custom Q()s, etc that return other join
# types.
joins, where, params = SortedDict(), [], []
for kwarg, value in kwarg_items:
path = kwarg.split(LOOKUP_SEPARATOR)
# Extract the last elements of the kwarg.
# The very-last is the lookup_type (equals, like, etc).
# The second-last is the table column on which the lookup_type is
# to be performed. If this name is 'pk', it will be substituted with
# the name of the primary key.
# If there is only one part, or the last part is not a query
# term, assume that the query is an __exact
lookup_type = path.pop()
if lookup_type == 'pk':
lookup_type = 'exact'
path.append(None)
elif len(path) == 0 or lookup_type not in QUERY_TERMS:
path.append(lookup_type)
lookup_type = 'exact'
if len(path) < 1:
raise TypeError, "Cannot parse keyword query %r" % kwarg
if value is None:
# Interpret '__exact=None' as the sql '= NULL'; otherwise, reject
# all uses of None as a query value.
if lookup_type != 'exact':
raise ValueError, "Cannot use None as a query value"
joins2, where2, params2 = lookup_inner(path, lookup_type, value, opts, opts.db_table, None)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
return joins, where, params
class FieldFound(Exception):
"Exception used to short circuit field-finding operations."
pass
def find_field(name, field_list, related_query):
"""
Finds a field with a specific name in a list of field instances.
Returns None if there are no matches, or several matches.
"""
if related_query:
matches = [f for f in field_list if f.field.related_query_name() == name]
else:
matches = [f for f in field_list if f.name == name]
if len(matches) != 1:
return None
return matches[0]
def lookup_inner(path, lookup_type, value, opts, table, column):
qn = backend.quote_name
joins, where, params = SortedDict(), [], []
current_opts = opts
current_table = table
current_column = column
intermediate_table = None
join_required = False
name = path.pop(0)
# Has the primary key been requested? If so, expand it out
# to be the name of the current class' primary key
if name is None or name == 'pk':
name = current_opts.pk.name
# Try to find the name in the fields associated with the current class
try:
# Does the name belong to a defined many-to-many field?
field = find_field(name, current_opts.many_to_many, False)
if field:
new_table = current_table + '__' + name
new_opts = field.rel.to._meta
new_column = new_opts.pk.column
# Need to create an intermediate table join over the m2m table
# This process hijacks current_table/column to point to the
# intermediate table.
current_table = "m2m_" + new_table
intermediate_table = field.m2m_db_table()
join_column = field.m2m_reverse_name()
intermediate_column = field.m2m_column_name()
raise FieldFound
# Does the name belong to a reverse defined many-to-many field?
field = find_field(name, current_opts.get_all_related_many_to_many_objects(), True)
if field:
new_table = current_table + '__' + name
new_opts = field.opts
new_column = new_opts.pk.column
# Need to create an intermediate table join over the m2m table.
# This process hijacks current_table/column to point to the
# intermediate table.
current_table = "m2m_" + new_table
intermediate_table = field.field.m2m_db_table()
join_column = field.field.m2m_column_name()
intermediate_column = field.field.m2m_reverse_name()
raise FieldFound
# Does the name belong to a one-to-many field?
field = find_field(name, current_opts.get_all_related_objects(), True)
if field:
new_table = table + '__' + name
new_opts = field.opts
new_column = field.field.column
join_column = opts.pk.column
# 1-N fields MUST be joined, regardless of any other conditions.
join_required = True
raise FieldFound
# Does the name belong to a one-to-one, many-to-one, or regular field?
field = find_field(name, current_opts.fields, False)
if field:
if field.rel: # One-to-One/Many-to-one field
new_table = current_table + '__' + name
new_opts = field.rel.to._meta
new_column = new_opts.pk.column
join_column = field.column
raise FieldFound
elif path:
# For regular fields, if there are still items on the path,
# an error has been made. We munge "name" so that the error
# properly identifies the cause of the problem.
name += LOOKUP_SEPARATOR + path[0]
else:
raise FieldFound
except FieldFound: # Match found, loop has been shortcut.
pass
else: # No match found.
raise TypeError, "Cannot resolve keyword '%s' into field" % name
# Check whether an intermediate join is required between current_table
# and new_table.
if intermediate_table:
joins[qn(current_table)] = (
qn(intermediate_table), "LEFT OUTER JOIN",
"%s.%s = %s.%s" % (qn(table), qn(current_opts.pk.column), qn(current_table), qn(intermediate_column))
)
if path:
# There are elements left in the path. More joins are required.
if len(path) == 1 and path[0] in (new_opts.pk.name, None) \
and lookup_type in ('exact', 'isnull') and not join_required:
# If the next and final name query is for a primary key,
# and the search is for isnull/exact, then the current
# (for N-1) or intermediate (for N-N) table can be used
# for the search. No need to join an extra table just
# to check the primary key.
new_table = current_table
else:
# There are 1 or more name queries pending, and we have ruled out
# any shortcuts; therefore, a join is required.
joins[qn(new_table)] = (
qn(new_opts.db_table), "INNER JOIN",
"%s.%s = %s.%s" % (qn(current_table), qn(join_column), qn(new_table), qn(new_column))
)
# If we have made the join, we don't need to tell subsequent
# recursive calls about the column name we joined on.
join_column = None
# There are name queries remaining. Recurse deeper.
joins2, where2, params2 = lookup_inner(path, lookup_type, value, new_opts, new_table, join_column)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
else:
# No elements left in path. Current element is the element on which
# the search is being performed.
if join_required:
# Last query term is a RelatedObject
if field.field.rel.multiple:
# RelatedObject is from a 1-N relation.
# Join is required; query operates on joined table.
column = new_opts.pk.name
joins[qn(new_table)] = (
qn(new_opts.db_table), "INNER JOIN",
"%s.%s = %s.%s" % (qn(current_table), qn(join_column), qn(new_table), qn(new_column))
)
current_table = new_table
else:
# RelatedObject is from a 1-1 relation,
# No need to join; get the pk value from the related object,
# and compare using that.
column = current_opts.pk.name
elif intermediate_table:
# Last query term is a related object from an N-N relation.
# Join from intermediate table is sufficient.
column = join_column
elif name == current_opts.pk.name and lookup_type in ('exact', 'isnull') and current_column:
# Last query term is for a primary key. If previous iterations
# introduced a current/intermediate table that can be used to
# optimize the query, then use that table and column name.
column = current_column
else:
# Last query term was a normal field.
column = field.column
where.append(get_where_clause(lookup_type, current_table + '.', column, value))
params.extend(field.get_db_prep_lookup(lookup_type, value))
return joins, where, params
def delete_objects(seen_objs):
"Iterate through a list of seen classes, and remove any instances that are referred to"
qn = backend.quote_name
ordered_classes = seen_objs.keys()
ordered_classes.reverse()
cursor = connection.cursor()
for cls in ordered_classes:
seen_objs[cls] = seen_objs[cls].items()
seen_objs[cls].sort()
# Pre notify all instances to be deleted
for pk_val, instance in seen_objs[cls]:
dispatcher.send(signal=signals.pre_delete, sender=cls, instance=instance)
pk_list = [pk for pk,instance in seen_objs[cls]]
for related in cls._meta.get_all_related_many_to_many_objects():
if not isinstance(related.field, GenericRelation):
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
cursor.execute("DELETE FROM %s WHERE %s IN (%s)" % \
(qn(related.field.m2m_db_table()),
qn(related.field.m2m_reverse_name()),
','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])),
pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE])
for f in cls._meta.many_to_many:
if isinstance(f, GenericRelation):
from django.contrib.contenttypes.models import ContentType
query_extra = 'AND %s=%%s' % f.rel.to._meta.get_field(f.content_type_field_name).column
args_extra = [ContentType.objects.get_for_model(cls).id]
else:
query_extra = ''
args_extra = []
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
cursor.execute(("DELETE FROM %s WHERE %s IN (%s)" % \
(qn(f.m2m_db_table()), qn(f.m2m_column_name()),
','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]]))) + query_extra,
pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE] + args_extra)
for field in cls._meta.fields:
if field.rel and field.null and field.rel.to in seen_objs:
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
cursor.execute("UPDATE %s SET %s=NULL WHERE %s IN (%s)" % \
(qn(cls._meta.db_table), qn(field.column), qn(cls._meta.pk.column),
','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])),
pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE])
# Now delete the actual data
for cls in ordered_classes:
seen_objs[cls].reverse()
pk_list = [pk for pk,instance in seen_objs[cls]]
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
cursor.execute("DELETE FROM %s WHERE %s IN (%s)" % \
(qn(cls._meta.db_table), qn(cls._meta.pk.column),
','.join(['%s' for pk in pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE]])),
pk_list[offset:offset+GET_ITERATOR_CHUNK_SIZE])
# Last cleanup; set NULLs where there once was a reference to the object,
# NULL the primary key of the found objects, and perform post-notification.
for pk_val, instance in seen_objs[cls]:
for field in cls._meta.fields:
if field.rel and field.null and field.rel.to in seen_objs:
setattr(instance, field.attname, None)
setattr(instance, cls._meta.pk.attname, None)
dispatcher.send(signal=signals.post_delete, sender=cls, instance=instance)
transaction.commit_unless_managed()
|
eepp/delicolour | refs/heads/master | delicolour/colour.py | 1 | import re
from colormath.color_objects import sRGBColor
from colormath.color_objects import HSVColor
from colormath.color_objects import HSLColor
from colormath.color_conversions import convert_color
class Colour:
def __init__(self, r=0, g=0, b=0):
self.set_rgb(r, g, b)
def __repr__(self):
r, g, b = self.rgb
return 'Colour({}, {}, {})'.format(r, g, b)
def set_rgb(self, r, g, b):
self._set_rgb_color(sRGBColor(r, g, b, True))
def set_r(self, r):
self._rgb_color.rgb_r = r
self._update_hsv_from_rgb()
def set_g(self, g):
self._rgb_color.rgb_g = g
self._update_hsv_from_rgb()
def set_b(self, b):
self._rgb_color.rgb_b = b
self._update_hsv_from_rgb()
def set_hsv(self, h, s, v):
self._set_hsv_color(HSVColor(h, s, v))
def _update_rgb_from_hsv(self):
self._rgb_color = convert_color(self._hsv_color, sRGBColor)
def _update_hsv_from_rgb(self):
self._hsv_color = convert_color(self._rgb_color, HSVColor)
@property
def rgb(self):
return self._rgb_color.get_upscaled_value_tuple()
@property
def hsv(self):
c = self._hsv_color
return c.hsv_h, c.hsv_s, c.hsv_v
@property
def hex(self):
r, g, b = self.rgb
return '{:02x}{:02x}{:02x}'.format(r, g, b)
def set_from_hex(self, hex_str):
hex_str = hex_str.strip()
if hex_str.startswith('#'):
hex_str = hex_str[1:]
if len(hex_str) == 3:
hex_str = '{r}{r}{g}{g}{b}{b}'.format(r=hex_str[0],
g=hex_str[1],
b=hex_str[2])
if len(hex_str) != 6:
return
color = sRGBColor.new_from_rgb_hex(hex_str)
self._set_rgb_color(color)
def _set_hsv_color(self, hsv):
self._hsv_color = hsv
self._update_rgb_from_hsv()
def _set_rgb_color(self, rgb):
self._rgb_color = rgb
self._update_hsv_from_rgb()
@property
def _hsl_color(self):
return convert_color(self._hsv_color, HSLColor)
def inc_light(self, val):
hsl = self._hsl_color
light = hsl.hsl_l
light += val
# clip
if light > 1:
light = 1
hsl.hsl_l = light
self._set_hsv_color(convert_color(hsl, HSVColor))
def dec_light(self, val):
hsl = self._hsl_color
light = hsl.hsl_l
light -= val
# clip
if light < 0:
light = 0
hsl.hsl_l = light
self._set_hsv_color(convert_color(hsl, HSVColor))
def inc_sat(self, val):
sat = self._hsv_color.hsv_s
sat += val
# clip
if sat > 1:
sat = 1
self._hsv_color.hsv_s = sat
self._update_rgb_from_hsv()
def dec_sat(self, val):
sat = self._hsv_color.hsv_s
sat -= val
# clip
if sat < 0:
sat = 0
self._hsv_color.hsv_s = sat
self._update_rgb_from_hsv()
@staticmethod
def _rgb_in_range(r, g, b):
good = True
for val in [r, g, b]:
good &= (val >= 0 and val <= 255)
return good
def set_from_css_rgb(self, rgb_str):
rgb_str = rgb_str.strip()
m = re.match(r'^rgb\s*\(\s*(\d+)\s*\,\s*(\d+)\s*\,\s*(\d+)\s*\)$', rgb_str)
if not m:
return
r = int(m.group(1))
g = int(m.group(2))
b = int(m.group(3))
if not Colour._rgb_in_range(r, g, b):
return
self.set_rgb(r, g, b)
def get_css_rgb(self):
r, g, b = self.rgb
return 'rgb({}, {}, {})'.format(r, g, b)
@staticmethod
def from_rgb(r, g, b):
c = Colour()
c.set_rgb(r, g, b)
return c
@staticmethod
def from_hsv(h, s, v):
c = Colour()
c.set_hsv(h, s, v)
return c
@staticmethod
def from_hex(hex_str):
c = Colour()
c.set_from_hex(hex_str)
return c
@staticmethod
def from_css_rgb(rgb_str):
c = Colour()
c.set_from_css_rgb(rgb_str)
return c
def copy(self):
r, g, b = self.rgb
return Colour.from_rgb(r, g, b)
|
dsajkl/123 | refs/heads/master | lms/djangoapps/shoppingcart/migrations/0003_auto__del_field_orderitem_line_cost.py | 182 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'OrderItem.line_cost'
db.delete_column('shoppingcart_orderitem', 'line_cost')
def backwards(self, orm):
# Adding field 'OrderItem.line_cost'
db.add_column('shoppingcart_orderitem', 'line_cost',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=30, decimal_places=2),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
adiyengar/Spirit | refs/heads/master | spirit/core/utils/ratelimit/ratelimit.py | 9 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from django.conf import settings
from django.core.cache import caches
TIME_DICT = {
's': 1,
'm': 60,
}
class RateLimit:
def __init__(self, request, uid, method=None, field=None, rate='5/5m'):
self.request = request
self.uid = uid
self.method = method or ['POST', ]
self.limit = None
self.time = None
self.cache_keys = []
self.cache_values = {}
if self.request.method in (m.upper() for m in self.method)\
and settings.ST_RATELIMIT_ENABLE:
self.limit, self.time = self.split_rate(rate)
self.cache_keys = self._get_keys(field)
self.cache_values = self._incr_cache()
def split_rate(self, rate):
limit, period = rate.split('/')
limit = int(limit)
if len(period) > 1:
time = TIME_DICT[period[-1]]
time *= int(period[:-1])
else:
time = TIME_DICT[period]
return limit, time
def _make_cache_key(self, key):
key_uid = '%s:%s' % (self.uid, key)
key_hash = hashlib.sha1(key_uid.encode('utf-8')).hexdigest()
return '%s:%s' % (settings.ST_RATELIMIT_CACHE_PREFIX, key_hash)
def _get_keys(self, field=None):
keys = []
if self.request.user.is_authenticated():
keys.append('user:%d' % self.request.user.pk)
else:
keys.append('ip:%s' % self.request.META['REMOTE_ADDR'])
if field is not None:
field_value = getattr(self.request, self.request.method).get(field, '')
if field_value:
keys.append('field:%s:%s' % (field, field_value))
return [self._make_cache_key(k) for k in keys]
def _incr_cache(self):
if not self.cache_keys:
return {}
cache = caches[settings.ST_RATELIMIT_CACHE]
cache_values = cache.get_many(self.cache_keys)
for key in self.cache_keys:
if key in cache_values:
cache_values[key] += 1
else:
cache_values[key] = 1
cache.set_many(cache_values, timeout=self.time)
return cache_values
def is_limited(self):
for count in self.cache_values.values():
if count > self.limit:
return True
return False
|
wisfern/vnpy | refs/heads/master | beta/api/korbit/vnkorbit.py | 1 | # encoding: utf-8
import urllib
import hashlib
import json
import requests
import hmac
import time
from datetime import datetime
from time import time, sleep , mktime
from Queue import Queue, Empty
from threading import Thread
import urllib
import websocket
import inspect
import requests
import cerberus
korbit_host = "https://api.korbit.co.kr/v1/"
KORBITFUNCTIONCODE_LIMIT_BID_ORDER = 'limit_bid_order'
KORBITFUNCTIONCODE_LIMIT_ASK_ORDER = 'limit_ask_order'
KORBITFUNCTIONCODE_CANCEL_ORDER = 'cancel'
KORBITFUNCTIONCODE_LIST_OPEN_ORDERS = 'list_open_orders' # 不实现这个了,好像用不到,获得所有的 开仓单
KORBITFUNCTIONCODE_LIST_EXCHANGE_ORDERS = 'list_exchange_orders' # 实现这个,获得所有的在交易的单子
KORBITFUNCTIONCODE_LIST_TRANSFERS = 'list_transfers' # 不实现这个,获得所有的 提现充值订单
KORBITFUNCTIONCODE_FEE = 'fee' # 不实现这个,获得交易所手续费
KORBITFUNCTIONCODE_BALANCES = "balances" # 实现这个,获取用户账户持仓信息
KORBITFUNCTIONCODE_ACCOUNTS = "accounts" # 不实现这个,获取用户的提币地址
KORBIT_ALL_SYMBOL_PAIR = ["btc_krw","bch_krw" , "eth_krw"]
KORBIT_ALL_SYMBOLS = ["krw","btc","eth","etc","xrp","bch"]
class Korbit_TradeApi(object):
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.accessKey = ''
self.secretKey = ''
self.__token = {}
self.active = False # API 工作状态
self.reqID = 0 # 请求编号
# self.reqQueue = Queue() # 请求队列
self.reqQueue = [] # 请求的队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
self.DEBUG = True
'''
直接发送 request ,获得身份
'''
def request_post(self, path, headers=None, data=None):
url = korbit_host + path
response = requests.post(url, headers=headers, data=data, timeout=20)
try:
return response.json()
except json.decoder.JSONDecodeError as e:
print "exception: {}, response_text: {}".format(e, response.text)
return response.text
'''
直接发送 get request ,获得其他数据
'''
def request_get(self, url, headers=None, params=None):
response = requests.get( url, headers=headers, params=params, timeout=self.__timeout)
try:
return response.json()
except json.decoder.JSONDecodeError as e:
logging.error("exception: {}, response_text: {}".format(e, response.text))
return response.text
'''
直接发送 request ,获得 token
'''
# https://apidocs.korbit.co.kr/#authentication
def create_token_directly(self, username, password):
payload = {
'client_id': self.accessKey,
'client_secret': self.secretKey,
'username': username,
'password': password,
'grant_type': "password"
}
self.__token = self.request_post("oauth2/access_token", data=payload)
return self.__token
def refresh_token(self):
payload = {
'client_id': self.__client_id,
'client_secret': self.__secret,
'refresh_token': self.__token['refresh_token'],
'grant_type': "refresh_token"
}
self.__token = self.request_post("oauth2/access_token", data=payload)
return self.__token
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
@property
def nonce(self):
return int(time() * 1000)
@property
def headers(self):
return {
'Accept': 'application/json',
'Authorization': "{} {}".format(self.__token['token_type'], self.__token['access_token'])
}
#----------------------------------------------------------------------
def processRequest(self , req):
"""处理请求"""
# 读取方法和参数
url = req['url']
method = req['method']
r = None
headers = self.headers
kwargs = req["kwargs"]
if method in [KORBITFUNCTIONCODE_LIMIT_BID_ORDER , KORBITFUNCTIONCODE_LIMIT_ASK_ORDER, KORBITFUNCTIONCODE_CANCEL_ORDER]:
if method in [KORBITFUNCTIONCODE_CANCEL_ORDER]:
payload = {
'id': kwargs["ids"],
'currency_pair': kwargs["currency_pair"],
'nonce': self.nonce
}
r = requests.post(url , headers=headers , data = payload)
else:
payload = {
'type': 'limit',
'currency_pair': kwargs["currency_pair"],
'price': kwargs["price"],
'coin_amount': kwargs["coin_amount"],
'fiat_amount': None,
'nonce': self.nonce
}
r = requests.post(url , headers=headers , data = payload)
elif method in [KORBITFUNCTIONCODE_LIST_EXCHANGE_ORDERS , KORBITFUNCTIONCODE_BALANCES]:
if method in [KORBITFUNCTIONCODE_LIST_EXCHANGE_ORDERS]:
payload = {
'currency_pair': kwargs["currency_pair"],
'offset': kwargs["offset"],
'limit': kwargs["limit"]
}
r = requests.get(url , headers = headers , params = payload)
elif method in [KORBITFUNCTIONCODE_BALANCES]:
r = requests.get(url , headers = headers )
# print url , method, headers , kwargs , r
if r != None and r.status_code == 200:
data = r.json()
return data
else:
try:
data = json.loads(r.text)
print "Error in r , " , data
return data
except Exception,ex:
print ex
return None
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
# req = self.reqQueue.get(block=True, timeout=0.1) # 获取请求的阻塞为一秒
if len(self.reqQueue) > 0:
(Type , req) = self.reqQueue[0]
self.reqQueue.pop(0)
callback = req['callback']
reqID = req['reqID']
data = self.processRequest(req)
# 请求成功
if data != None :
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
sleep(0.1)
except Exception,ex:
print ex
#----------------------------------------------------------------------
def sendRequest(self, url , method, callback, kwargs = None,optional=None):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['url'] = url
req['method'] = method
req['callback'] = callback
req['optional'] = optional
req['kwargs'] = kwargs
req['reqID'] = self.reqID
if method in [KORBITFUNCTIONCODE_LIST_EXCHANGE_ORDERS ]:
flag = False
for use_method ,r in self.reqQueue:
if use_method == method:
flag = True
break
if False == flag:
self.reqQueue.append( (method , req))
else:
self.reqQueue.append( (method , req))
#self.reqQueue.put(req)
# 返回请求编号
return self.reqID
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
####################################################
## 主动函数
####################################################
#----------------------------------------------------------------------
def init(self, accessKey, secretKey , username , password):
"""初始化"""
self.accessKey = accessKey
self.secretKey = secretKey
self.create_token_directly( username , password)
self.active = True
self.reqThread.start()
#----------------------------------------------------------------------
def buy_currency(self , coin_amount, price, currency_pair="btc_krw"):
kwargs = {"currency_pair":currency_pair , "coin_amount":coin_amount , "price":int(price)}
return self.sendRequest( korbit_host + "user/orders/buy" , KORBITFUNCTIONCODE_LIMIT_BID_ORDER , self.on_buy_currency , kwargs = kwargs, optional = None)
#----------------------------------------------------------------------
def sell_currency(self , coin_amount , price , currency_pair="btc_krw"):
kwargs = {"currency_pair":currency_pair , "coin_amount":coin_amount , "price":int(price)}
return self.sendRequest( korbit_host + "user/orders/sell" , KORBITFUNCTIONCODE_LIMIT_ASK_ORDER , self.on_sell_currency , kwargs = kwargs, optional = None)
#----------------------------------------------------------------------
def list_market_orders(self , currency_pair = "btc_krw" , offset = 0 , limit = 50):
kwargs = {"currency_pair":currency_pair , "offset":offset , "limit":limit}
return self.sendRequest( korbit_host + "user/orders",KORBITFUNCTIONCODE_LIST_EXCHANGE_ORDERS , self.on_list_exchange_orders , kwargs = kwargs , optional = None)
#----------------------------------------------------------------------
def cancel_orders(self , order_id , currency_pair = "btc_krw"):
kwargs = {"currency_pair":currency_pair , "ids": order_id}
return self.sendRequest( korbit_host + "user/orders/cancel" , KORBITFUNCTIONCODE_CANCEL_ORDER , self.onCancelOrders , kwargs = kwargs , optional = None)
#----------------------------------------------------------------------
def user_balances(self):
return self.sendRequest( korbit_host + "user/balances" , KORBITFUNCTIONCODE_BALANCES , self.onBalances , kwargs = {} , optional = None)
####################################################
## 回调函数
####################################################
def on_buy_currency(self, data , req, reqID):
print data
#----------------------------------------------------------------------
def on_sell_currency(self, data , req, reqID):
print data
#----------------------------------------------------------------------
def on_list_exchange_orders(self, data , req, reqID):
print data
#----------------------------------------------------------------------
def onCancelOrders(self, data , req, reqID):
print data
#----------------------------------------------------------------------
def onBalances(self, data , req, reqID):
print data
class Korbit_DataApi(object):
simple_ticker_url = korbit_host + "ticker"
detail_ticker_url = korbit_host + "ticker/detailed"
orderbook_url = korbit_host + "orderbook"
transactions_url = korbit_host + "transactions"
constants_url = korbit_host + "constants"
def __init__(self ):
self.active = False
self.taskInterval = 0 # 每轮请求延时
self.taskList = [] # 订阅的任务列表
self.taskThread = Thread(target=self.run) # 处理任务的线程
#----------------------------------------------------------------------
def init(self, interval, debug):
"""初始化"""
self.taskInterval = interval
self.DEBUG = debug
self.active = True
self.taskThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.taskThread.isAlive():
self.taskThread.join()
#----------------------------------------------------------------------
def run(self):
"""连续运行"""
while self.active:
for url, callback , symbol in self.taskList:
try:
r = requests.get(url)
if r.status_code == 200:
data = r.json()
data["symbol"] = symbol
if self.DEBUG:
print callback.__name__
callback(data)
except Exception, e:
print "Korbit_DataApi" , e
sleep(self.taskInterval)
#----------------------------------------------------------------------
def subscribeTick(self, symbol):
"""订阅实时成交数据"""
url = self.detail_ticker_url + "?currency_pair=" + symbol
task = (url, self.onTick , symbol)
self.taskList.append( task)
#----------------------------------------------------------------------
def subscribeTrades(self, symbol ):
"""订阅实时成交数据"""
# time in ('minute','hour','day')
url = self.transactions_url + "?currency_pair=" + symbol + "&time=" + "minute"
task = (url, self.onTrades , symbol)
self.taskList.append(task)
#----------------------------------------------------------------------
def subscribeOrderbooks(self, symbol):
"""订阅实时成交数据"""
url = self.orderbook_url + "?currency_pair=" + symbol
task = (url, self.onOrderbooks , symbol)
self.taskList.append(task)
#----------------------------------------------------------------------
def onTick(self, data):
"""实时成交推送"""
print data
#----------------------------------------------------------------------
def onTrades(self, data):
"""实时成交推送"""
print data
#----------------------------------------------------------------------
def onOrderbooks(self, data):
"""实时成交推送"""
print data
|
havard024/prego | refs/heads/master | venv/lib/python2.7/site-packages/south/tests/deps_c/migrations/0005_c.py | 348 | from south.db import db
from django.db import models
class Migration:
depends_on = [('deps_a', '0002_a')]
def forwards(self):
pass
def backwards(self):
pass
|
40223204/w16b_test | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/codecs.py | 739 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
0jpq0/kbengine | refs/heads/master | kbe/src/lib/python/Tools/clinic/clinic_test.py | 41 | # Argument Clinic
# Copyright 2012-2013 by Larry Hastings.
# Licensed to the PSF under a contributor agreement.
#
import builtins
import clinic
from clinic import DSLParser
import collections
import inspect
from test import support
import sys
import unittest
from unittest import TestCase
class FakeConverter:
def __init__(self, name, args):
self.name = name
self.args = args
class FakeConverterFactory:
def __init__(self, name):
self.name = name
def __call__(self, name, default, **kwargs):
return FakeConverter(self.name, kwargs)
class FakeConvertersDict:
def __init__(self):
self.used_converters = {}
def get(self, name, default):
return self.used_converters.setdefault(name, FakeConverterFactory(name))
clinic.Clinic.presets_text = ''
c = clinic.Clinic(language='C')
class FakeClinic:
def __init__(self):
self.converters = FakeConvertersDict()
self.legacy_converters = FakeConvertersDict()
self.language = clinic.CLanguage(None)
self.filename = None
self.block_parser = clinic.BlockParser('', self.language)
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
clinic.clinic = self
self.name = "FakeClinic"
self.line_prefix = self.line_suffix = ''
self.destinations = {}
self.add_destination("block", "buffer")
self.add_destination("file", "buffer")
self.add_destination("suppress", "suppress")
d = self.destinations.get
self.field_destinations = collections.OrderedDict((
('docstring_prototype', d('suppress')),
('docstring_definition', d('block')),
('methoddef_define', d('block')),
('impl_prototype', d('block')),
('parser_prototype', d('suppress')),
('parser_definition', d('block')),
('impl_definition', d('block')),
))
def get_destination(self, name):
d = self.destinations.get(name)
if not d:
sys.exit("Destination does not exist: " + repr(name))
return d
def add_destination(self, name, type, *args):
if name in self.destinations:
sys.exit("Destination already exists: " + repr(name))
self.destinations[name] = clinic.Destination(name, type, self, *args)
def is_directive(self, name):
return name == "module"
def directive(self, name, args):
self.called_directives[name] = args
_module_and_class = clinic.Clinic._module_and_class
class ClinicWholeFileTest(TestCase):
def test_eol(self):
# regression test:
# clinic's block parser didn't recognize
# the "end line" for the block if it
# didn't end in "\n" (as in, the last)
# byte of the file was '/'.
# so it woudl spit out an end line for you.
# and since you really already had one,
# the last line of the block got corrupted.
c = clinic.Clinic(clinic.CLanguage(None))
raw = "/*[clinic]\nfoo\n[clinic]*/"
cooked = c.parse(raw).splitlines()
end_line = cooked[2].rstrip()
# this test is redundant, it's just here explicitly to catch
# the regression test so we don't forget what it looked like
self.assertNotEqual(end_line, "[clinic]*/[clinic]*/")
self.assertEqual(end_line, "[clinic]*/")
class ClinicGroupPermuterTest(TestCase):
def _test(self, l, m, r, output):
computed = clinic.permute_optional_groups(l, m, r)
self.assertEqual(output, computed)
def test_range(self):
self._test([['start']], ['stop'], [['step']],
(
('stop',),
('start', 'stop',),
('start', 'stop', 'step',),
))
def test_add_window(self):
self._test([['x', 'y']], ['ch'], [['attr']],
(
('ch',),
('ch', 'attr'),
('x', 'y', 'ch',),
('x', 'y', 'ch', 'attr'),
))
def test_ludicrous(self):
self._test([['a1', 'a2', 'a3'], ['b1', 'b2']], ['c1'], [['d1', 'd2'], ['e1', 'e2', 'e3']],
(
('c1',),
('b1', 'b2', 'c1'),
('b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2', 'e1', 'e2', 'e3'),
))
def test_right_only(self):
self._test([], [], [['a'],['b'],['c']],
(
(),
('a',),
('a', 'b'),
('a', 'b', 'c')
))
def test_have_left_options_but_required_is_empty(self):
def fn():
clinic.permute_optional_groups(['a'], [], [])
self.assertRaises(AssertionError, fn)
class ClinicLinearFormatTest(TestCase):
def _test(self, input, output, **kwargs):
computed = clinic.linear_format(input, **kwargs)
self.assertEqual(output, computed)
def test_empty_strings(self):
self._test('', '')
def test_solo_newline(self):
self._test('\n', '\n')
def test_no_substitution(self):
self._test("""
abc
""", """
abc
""")
def test_empty_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
def
""", name='')
def test_single_line_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
GARGLE
def
""", name='GARGLE')
def test_multiline_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
bingle
bungle
def
""", name='bingle\nbungle\n')
class InertParser:
def __init__(self, clinic):
pass
def parse(self, block):
pass
class CopyParser:
def __init__(self, clinic):
pass
def parse(self, block):
block.output = block.input
class ClinicBlockParserTest(TestCase):
def _test(self, input, output):
language = clinic.CLanguage(None)
blocks = list(clinic.BlockParser(input, language))
writer = clinic.BlockPrinter(language)
for block in blocks:
writer.print_block(block)
output = writer.f.getvalue()
assert output == input, "output != input!\n\noutput " + repr(output) + "\n\n input " + repr(input)
def round_trip(self, input):
return self._test(input, input)
def test_round_trip_1(self):
self.round_trip("""
verbatim text here
lah dee dah
""")
def test_round_trip_2(self):
self.round_trip("""
verbatim text here
lah dee dah
/*[inert]
abc
[inert]*/
def
/*[inert checksum: 7b18d017f89f61cf17d47f92749ea6930a3f1deb]*/
xyz
""")
def _test_clinic(self, input, output):
language = clinic.CLanguage(None)
c = clinic.Clinic(language)
c.parsers['inert'] = InertParser(c)
c.parsers['copy'] = CopyParser(c)
computed = c.parse(input)
self.assertEqual(output, computed)
def test_clinic_1(self):
self._test_clinic("""
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
abc
/*[copy end generated code: output=03cfd743661f0797 input=7b18d017f89f61cf]*/
xyz
""", """
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
def
/*[copy end generated code: output=7b18d017f89f61cf input=7b18d017f89f61cf]*/
xyz
""")
class ClinicParserTest(TestCase):
def test_trivial(self):
parser = DSLParser(FakeClinic())
block = clinic.Block("module os\nos.access")
parser.parse(block)
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_ignore_line(self):
block = self.parse("#\nmodule os\nos.access")
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_param(self):
function = self.parse_function("module os\nos.access\n path: int")
self.assertEqual("access", function.name)
self.assertEqual(2, len(function.parameters))
p = function.parameters['path']
self.assertEqual('path', p.name)
self.assertIsInstance(p.converter, clinic.int_converter)
def test_param_default(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_with_continuations(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: \\\n bool \\\n =\\\n True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_default_expression(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: int(c_default='MAXSIZE') = sys.maxsize")
p = function.parameters['follow_symlinks']
self.assertEqual(sys.maxsize, p.default)
self.assertEqual("MAXSIZE", p.converter.c_default)
s = self.parse_function_should_fail("module os\nos.access\n follow_symlinks: int = sys.maxsize")
self.assertEqual(s, "Error on line 0:\nWhen you specify a named constant ('sys.maxsize') as your default value,\nyou MUST specify a valid c_default.\n")
def test_param_no_docstring(self):
function = self.parse_function("""
module os
os.access
follow_symlinks: bool = True
something_else: str = ''""")
p = function.parameters['follow_symlinks']
self.assertEqual(3, len(function.parameters))
self.assertIsInstance(function.parameters['something_else'].converter, clinic.str_converter)
def test_param_default_parameters_out_of_order(self):
s = self.parse_function_should_fail("""
module os
os.access
follow_symlinks: bool = True
something_else: str""")
self.assertEqual(s, """Error on line 0:
Can't have a parameter without a default ('something_else')
after a parameter with a default!
""")
def disabled_test_converter_arguments(self):
function = self.parse_function("module os\nos.access\n path: path_t(allow_fd=1)")
p = function.parameters['path']
self.assertEqual(1, p.converter.args['allow_fd'])
def test_function_docstring(self):
function = self.parse_function("""
module os
os.stat as os_stat_fn
path: str
Path to be examined
Perform a stat system call on the given path.""")
self.assertEqual("""
stat($module, /, path)
--
Perform a stat system call on the given path.
path
Path to be examined
""".strip(), function.docstring)
def test_explicit_parameters_in_docstring(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Documentation for x.
y: int
This is the documentation for foo.
Okay, we're done here.
""")
self.assertEqual("""
bar($module, /, x, y)
--
This is the documentation for foo.
x
Documentation for x.
Okay, we're done here.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_c_name(self):
function = self.parse_function("module os\nos.stat as os_stat_fn")
self.assertEqual("os_stat_fn", function.c_basename)
def test_return_converter(self):
function = self.parse_function("module os\nos.stat -> int")
self.assertIsInstance(function.return_converter, clinic.int_return_converter)
def test_star(self):
function = self.parse_function("module os\nos.access\n *\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(inspect.Parameter.KEYWORD_ONLY, p.kind)
self.assertEqual(0, p.group)
def test_group(self):
function = self.parse_function("module window\nwindow.border\n [\n ls : int\n ]\n /\n")
p = function.parameters['ls']
self.assertEqual(1, p.group)
def test_left_group(self):
function = self.parse_function("""
module curses
curses.addch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
ch: char
Character to add.
[
attr: long
Attributes for the character.
]
/
""")
for name, group in (
('y', -1), ('x', -1),
('ch', 0),
('attr', 1),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
addch([y, x,] ch, [attr])
y
Y-coordinate.
x
X-coordinate.
ch
Character to add.
attr
Attributes for the character.
""".strip())
def test_nested_groups(self):
function = self.parse_function("""
module curses
curses.imaginary
[
[
y1: int
Y-coordinate.
y2: int
Y-coordinate.
]
x1: int
X-coordinate.
x2: int
X-coordinate.
]
ch: char
Character to add.
[
attr1: long
Attributes for the character.
attr2: long
Attributes for the character.
attr3: long
Attributes for the character.
[
attr4: long
Attributes for the character.
attr5: long
Attributes for the character.
attr6: long
Attributes for the character.
]
]
/
""")
for name, group in (
('y1', -2), ('y2', -2),
('x1', -1), ('x2', -1),
('ch', 0),
('attr1', 1), ('attr2', 1), ('attr3', 1),
('attr4', 2), ('attr5', 2), ('attr6', 2),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
imaginary([[y1, y2,] x1, x2,] ch, [attr1, attr2, attr3, [attr4, attr5,
attr6]])
y1
Y-coordinate.
y2
Y-coordinate.
x1
X-coordinate.
x2
X-coordinate.
ch
Character to add.
attr1
Attributes for the character.
attr2
Attributes for the character.
attr3
Attributes for the character.
attr4
Attributes for the character.
attr5
Attributes for the character.
attr6
Attributes for the character.
""".strip())
def parse_function_should_fail(self, s):
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
self.parse_function(s)
return stdout.getvalue()
def test_disallowed_grouping__two_top_groups_on_left(self):
s = self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_left
[
group1 : int
]
[
group2 : int
]
param: int
""")
self.assertEqual(s,
('Error on line 0:\n'
'Function two_top_groups_on_left has an unsupported group configuration. (Unexpected state 2.b)\n'))
def test_disallowed_grouping__two_top_groups_on_right(self):
self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_right
param: int
[
group1 : int
]
[
group2 : int
]
""")
def test_disallowed_grouping__parameter_after_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.parameter_after_group_on_right
param: int
[
[
group1 : int
]
group2 : int
]
""")
def test_disallowed_grouping__group_after_parameter_on_left(self):
self.parse_function_should_fail("""
module foo
foo.group_after_parameter_on_left
[
group2 : int
[
group1 : int
]
]
param: int
""")
def test_disallowed_grouping__empty_group_on_left(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
[
[
]
group2 : int
]
param: int
""")
def test_disallowed_grouping__empty_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
param: int
[
[
]
group2 : int
]
""")
def test_no_parameters(self):
function = self.parse_function("""
module foo
foo.bar
Docstring
""")
self.assertEqual("bar($module, /)\n--\n\nDocstring", function.docstring)
self.assertEqual(1, len(function.parameters)) # self!
def test_init_with_no_parameters(self):
function = self.parse_function("""
module foo
class foo.Bar "unused" "notneeded"
foo.Bar.__init__
Docstring
""", signatures_in_block=3, function_index=2)
# self is not in the signature
self.assertEqual("Bar()\n--\n\nDocstring", function.docstring)
# but it *is* a parameter
self.assertEqual(1, len(function.parameters))
def test_illegal_module_line(self):
self.parse_function_should_fail("""
module foo
foo.bar => int
/
""")
def test_illegal_c_basename(self):
self.parse_function_should_fail("""
module foo
foo.bar as 935
/
""")
def test_single_star(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
*
""")
def test_parameters_required_after_star_without_initial_parameters_or_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
""")
def test_parameters_required_after_star_without_initial_parameters_with_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
Docstring here.
""")
def test_parameters_required_after_star_with_initial_parameters_without_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
""")
def test_parameters_required_after_star_with_initial_parameters_and_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
Docstring.
""")
def test_single_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
/
""")
def test_mix_star_and_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
x: int
y: int
*
z: int
/
""")
def test_parameters_not_permitted_after_slash_for_now(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
x: int
""")
def test_function_not_at_column_0(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Nested docstring here, goeth.
*
y: str
Not at column 0!
""")
self.assertEqual("""
bar($module, /, x, *, y)
--
Not at column 0!
x
Nested docstring here, goeth.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_directive(self):
c = FakeClinic()
parser = DSLParser(c)
parser.flag = False
parser.directives['setflag'] = lambda : setattr(parser, 'flag', True)
block = clinic.Block("setflag")
parser.parse(block)
self.assertTrue(parser.flag)
def test_legacy_converters(self):
block = self.parse('module os\nos.access\n path: "s"')
module, function = block.signatures
self.assertIsInstance((function.parameters['path']).converter, clinic.str_converter)
def parse(self, text):
c = FakeClinic()
parser = DSLParser(c)
block = clinic.Block(text)
parser.parse(block)
return block
def parse_function(self, text, signatures_in_block=2, function_index=1):
block = self.parse(text)
s = block.signatures
self.assertEqual(len(s), signatures_in_block)
assert isinstance(s[0], clinic.Module)
assert isinstance(s[function_index], clinic.Function)
return s[function_index]
def test_scaffolding(self):
# test repr on special values
self.assertEqual(repr(clinic.unspecified), '<Unspecified>')
self.assertEqual(repr(clinic.NULL), '<Null>')
# test that fail fails
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
clinic.fail('The igloos are melting!', filename='clown.txt', line_number=69)
self.assertEqual(stdout.getvalue(), 'Error in file "clown.txt" on line 69:\nThe igloos are melting!\n')
if __name__ == "__main__":
unittest.main()
|
claudep/translate | refs/heads/master | translate/tools/test_phppo2pypo.py | 26 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# phppo2pypo unit tests
# Author: Wil Clouser <wclouser@mozilla.com>
# Date: 2009-12-03
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.tools import phppo2pypo
class TestPhpPo2PyPo:
def test_single_po(self):
inputfile = """
# This user comment refers to: %1$s
#. This developer comment does too: %1$s
#: some/path.php:111
#, php-format
msgid "I have %2$s apples and %1$s oranges"
msgstr "I have %2$s apples and %1$s oranges"
"""
outputfile = wStringIO.StringIO()
phppo2pypo.convertphp2py(inputfile, outputfile)
output = outputfile.getvalue()
assert "refers to: {0}" in output
assert "does too: {0}" in output
assert 'msgid "I have {1} apples and {0} oranges"' in output
assert 'msgstr "I have {1} apples and {0} oranges"' in output
def test_plural_po(self):
inputfile = """
#. This developer comment refers to %1$s
#: some/path.php:111
#, php-format
msgid "I have %1$s apple"
msgid_plural "I have %1$s apples"
msgstr[0] "I have %1$s apple"
msgstr[1] "I have %1$s apples"
"""
outputfile = wStringIO.StringIO()
phppo2pypo.convertphp2py(inputfile, outputfile)
output = outputfile.getvalue()
assert 'msgid "I have {0} apple"' in output
assert 'msgid_plural "I have {0} apples"' in output
assert 'msgstr[0] "I have {0} apple"' in output
assert 'msgstr[1] "I have {0} apples"' in output
class TestPhpPo2PyPoCommand(test_convert.TestConvertCommand, TestPhpPo2PyPo):
"""Tests running actual phppo2pypo commands on files"""
convertmodule = phppo2pypo
defaultoptions = {}
|
wangjun/odoo | refs/heads/8.0 | addons/account_asset/wizard/account_asset_change_duration.py | 258 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class asset_modify(osv.osv_memory):
_name = 'asset.modify'
_description = 'Modify Asset'
def _get_asset_method_time(self, cr, uid, ids, field_name, arg, context=None):
if ids and len(ids) == 1 and context.get('active_id'):
asset = self.pool['account.asset.asset'].browse(cr, uid, context.get('active_id'), context=context)
return {ids[0]: asset.method_time}
else:
return dict.fromkeys(ids, False)
_columns = {
'name': fields.char('Reason', required=True),
'method_number': fields.integer('Number of Depreciations', required=True),
'method_period': fields.integer('Period Length'),
'method_end': fields.date('Ending date'),
'note': fields.text('Notes'),
'asset_method_time': fields.function(_get_asset_method_time, type='char', string='Asset Method Time', readonly=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
res = super(asset_modify, self).default_get(cr, uid, fields, context=context)
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
if 'name' in fields:
res.update({'name': asset.name})
if 'method_number' in fields and asset.method_time == 'number':
res.update({'method_number': asset.method_number})
if 'method_period' in fields:
res.update({'method_period': asset.method_period})
if 'method_end' in fields and asset.method_time == 'end':
res.update({'method_end': asset.method_end})
if context.get('active_id'):
res['asset_method_time'] = self._get_asset_method_time(cr, uid, [0], 'asset_method_time', [], context=context)[0]
return res
def modify(self, cr, uid, ids, context=None):
""" Modifies the duration of asset for calculating depreciation
and maintains the history of old values.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of Ids
@param context: A standard dictionary
@return: Close the wizard.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
history_obj = self.pool.get('account.asset.history')
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
data = self.browse(cr, uid, ids[0], context=context)
history_vals = {
'asset_id': asset_id,
'name': data.name,
'method_time': asset.method_time,
'method_number': asset.method_number,
'method_period': asset.method_period,
'method_end': asset.method_end,
'user_id': uid,
'date': time.strftime('%Y-%m-%d'),
'note': data.note,
}
history_obj.create(cr, uid, history_vals, context=context)
asset_vals = {
'method_number': data.method_number,
'method_period': data.method_period,
'method_end': data.method_end,
}
asset_obj.write(cr, uid, [asset_id], asset_vals, context=context)
asset_obj.compute_depreciation_board(cr, uid, [asset_id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eeshangarg/zulip | refs/heads/master | zerver/lib/user_status.py | 5 | from typing import Any, Dict, Optional
from django.db.models import Q
from django.utils.timezone import now as timezone_now
from zerver.models import UserStatus
def get_user_info_dict(realm_id: int) -> Dict[str, Dict[str, Any]]:
rows = (
UserStatus.objects.filter(
user_profile__realm_id=realm_id,
user_profile__is_active=True,
)
.exclude(
Q(status=UserStatus.NORMAL) & Q(status_text=""),
)
.values(
"user_profile_id",
"status",
"status_text",
)
)
user_dict: Dict[str, Dict[str, Any]] = {}
for row in rows:
away = row["status"] == UserStatus.AWAY
status_text = row["status_text"]
user_id = row["user_profile_id"]
dct = {}
if away:
dct["away"] = away
if status_text:
dct["status_text"] = status_text
user_dict[str(user_id)] = dct
return user_dict
def update_user_status(
user_profile_id: int, status: Optional[int], status_text: Optional[str], client_id: int
) -> None:
timestamp = timezone_now()
defaults = dict(
client_id=client_id,
timestamp=timestamp,
)
if status is not None:
defaults["status"] = status
if status_text is not None:
defaults["status_text"] = status_text
UserStatus.objects.update_or_create(
user_profile_id=user_profile_id,
defaults=defaults,
)
|
hitsthings/node-gyp | refs/heads/master | gyp/test/win/gyptest-cl-buffer-security-check.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
|
maxamillion/anaconda | refs/heads/master | pyanaconda/ui/gui/helpers.py | 1 | # Abstract base classes for GUI classes
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Shea <dshea@redhat.com>
#
# This file contains abstract base classes that are specific to GUI
# functionality. See also pyanaconda.ui.helpers.
from abc import ABCMeta, abstractproperty, abstractmethod
from gi.repository import Gtk
from pyanaconda.ui.helpers import InputCheck, InputCheckHandler
# Inherit abstract methods from InputCheckHandler
# pylint: disable=abstract-method
class GUIInputCheckHandler(InputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for Gtk input screens.
This class assumes that all input objects are of type GtkEditable and
attaches InputCheck.update_check_status to the changed signal.
"""
def _update_check_status(self, editable, inputcheck):
inputcheck.update_check_status()
def get_input(self, input_obj):
return input_obj.get_text()
def add_check(self, input_obj, run_check, data=None):
checkRef = InputCheckHandler.add_check(self, input_obj, run_check, data)
input_obj.connect_after("changed", self._update_check_status, checkRef)
return checkRef
class GUIDialogInputCheckHandler(GUIInputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for Gtk dialogs.
This class provides a helper method for setting an error message
on an entry field. Implementors of this class must still provide
a set_status method in order to control the sensitivty of widgets or
ignore activated signals.
"""
@abstractmethod
def set_status(self, inputcheck):
if inputcheck.check_status in (InputCheck.CHECK_OK, InputCheck.CHECK_SILENT):
inputcheck.input_obj.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
inputcheck.input_obj.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, "")
else:
inputcheck.input_obj.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY,
"dialog-error")
inputcheck.input_obj.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
inputcheck.check_status)
class GUISpokeInputCheckHandler(GUIInputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for graphical spokes.
This class implements set_status to set a message in the warning area of
the spoke window and provides an implementation of on_back_clicked to
prevent the user from exiting a spoke with bad input.
"""
def set_status(self, inputcheck):
"""Update the warning with the input validation error from the first
error message.
"""
failed_check = next(self.failed_checks_with_message, None)
self.clear_info()
if failed_check:
self.set_warning(failed_check.check_status)
# Implemented by GUIObject
@abstractmethod
def clear_info(self):
pass
# Implemented by GUIObject
@abstractmethod
def set_warning(self, msg):
pass
# Implemented by GUIObject
@abstractproperty
def window(self):
pass
@abstractmethod
def on_back_clicked(self, window):
"""Check whether the input validation checks allow the spoke to be exited.
Unlike NormalSpoke.on_back_clicked, this function returns a boolean value.
Classes implementing this class should run GUISpokeInputCheckHandler.on_back_clicked,
and if it succeeded, run NormalSpoke.on_back_clicked.
"""
failed_check = next(self.failed_checks, None)
if failed_check:
failed_check.input_obj.grab_focus()
return False
else:
return True
|
KarimAllah/nova | refs/heads/master | nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py | 2 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
from nova import utils
meta = MetaData()
# virtual interface table to add to DB
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('network_id',
Integer(),
ForeignKey('networks.id')),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
mysql_engine='InnoDB')
# bridge_interface column to add to networks table
interface = Column('bridge_interface',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
# virtual interface id column to add to fixed_ips table
# foreignkey added in next migration
virtual_interface_id = Column('virtual_interface_id',
Integer())
def upgrade(migrate_engine):
meta.bind = migrate_engine
# grab tables and (column for dropping later)
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
c = instances.columns['mac_address']
# add interface column to networks table
# values will have to be set manually before running nova
try:
networks.create_column(interface)
except Exception:
logging.error(_("interface column not added to networks table"))
raise
# create virtual_interfaces table
try:
virtual_interfaces.create()
except Exception:
logging.error(_("Table |%s| not created!"), repr(virtual_interfaces))
raise
# add virtual_interface_id column to fixed_ips table
try:
fixed_ips.create_column(virtual_interface_id)
except Exception:
logging.error(_("VIF column not added to fixed_ips table"))
raise
# populate the virtual_interfaces table
# extract data from existing instance and fixed_ip tables
s = select([instances.c.id, instances.c.mac_address,
fixed_ips.c.network_id],
fixed_ips.c.instance_id == instances.c.id)
keys = ('instance_id', 'address', 'network_id')
join_list = [dict(zip(keys, row)) for row in s.execute()]
logging.debug(_("join list for moving mac_addresses |%s|"), join_list)
# insert data into the table
if join_list:
i = virtual_interfaces.insert()
i.execute(join_list)
# populate the fixed_ips virtual_interface_id column
s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
fixed_ips.c.instance_id != None)
for row in s.execute():
m = select([virtual_interfaces.c.id]).\
where(virtual_interfaces.c.instance_id == row['instance_id']).\
as_scalar()
u = fixed_ips.update().values(virtual_interface_id=m).\
where(fixed_ips.c.id == row['id'])
u.execute()
# drop the mac_address column from instances
c.drop()
def downgrade(migrate_engine):
logging.error(_("Can't downgrade without losing data"))
raise Exception
|
aleonliao/depot_tools | refs/heads/master | recipes.py | 11 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstrap script to clone and forward to the recipe engine tool."""
import ast
import logging
import os
import random
import re
import subprocess
import sys
import time
import traceback
BOOTSTRAP_VERSION = 1
# The root of the repository relative to the directory of this file.
REPO_ROOT = ''
# The path of the recipes.cfg file relative to the root of the repository.
RECIPES_CFG = os.path.join('infra', 'config', 'recipes.cfg')
def parse_protobuf(fh):
"""Parse the protobuf text format just well enough to understand recipes.cfg.
We don't use the protobuf library because we want to be as self-contained
as possible in this bootstrap, so it can be simply vendored into a client
repo.
We assume all fields are repeated since we don't have a proto spec to work
with.
Args:
fh: a filehandle containing the text format protobuf.
Returns:
A recursive dictionary of lists.
"""
def parse_atom(text):
if text == 'true': return True
if text == 'false': return False
return ast.literal_eval(text)
ret = {}
for line in fh:
line = line.strip()
m = re.match(r'(\w+)\s*:\s*(.*)', line)
if m:
ret.setdefault(m.group(1), []).append(parse_atom(m.group(2)))
continue
m = re.match(r'(\w+)\s*{', line)
if m:
subparse = parse_protobuf(fh)
ret.setdefault(m.group(1), []).append(subparse)
continue
if line == '}': return ret
if line == '': continue
raise Exception('Could not understand line: <%s>' % line)
return ret
def get_unique(things):
if len(things) == 1:
return things[0]
elif len(things) == 0:
raise ValueError("Expected to get one thing, but dinna get none.")
else:
logging.warn('Expected to get one thing, but got a bunch: %s\n%s' %
(things, traceback.format_stack()))
return things[0]
def main():
if sys.platform.startswith(('win', 'cygwin')):
git = 'git.bat'
else:
git = 'git'
# Find the repository and config file to operate on.
repo_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), REPO_ROOT))
recipes_cfg_path = os.path.join(repo_root, RECIPES_CFG)
with open(recipes_cfg_path, 'rU') as fh:
protobuf = parse_protobuf(fh)
engine_buf = get_unique([
b for b in protobuf['deps'] if b.get('project_id') == ['recipe_engine'] ])
engine_url = get_unique(engine_buf['url'])
engine_revision = get_unique(engine_buf['revision'])
engine_subpath = (get_unique(engine_buf.get('path_override', ['']))
.replace('/', os.path.sep))
recipes_path = os.path.join(repo_root,
get_unique(protobuf['recipes_path']).replace('/', os.path.sep))
deps_path = os.path.join(recipes_path, '.recipe_deps')
engine_path = os.path.join(deps_path, 'recipe_engine')
# Ensure that we have the recipe engine cloned.
def ensure_engine():
if not os.path.exists(deps_path):
os.makedirs(deps_path)
if not os.path.exists(engine_path):
subprocess.check_call([git, 'clone', engine_url, engine_path])
needs_fetch = subprocess.call(
[git, 'rev-parse', '--verify', '%s^{commit}' % engine_revision],
cwd=engine_path, stdout=open(os.devnull, 'w'))
if needs_fetch:
subprocess.check_call([git, 'fetch'], cwd=engine_path)
subprocess.check_call(
[git, 'checkout', '--quiet', engine_revision], cwd=engine_path)
try:
ensure_engine()
except subprocess.CalledProcessError as e:
if e.returncode == 128: # Thrown when git gets a lock error.
time.sleep(random.uniform(2,5))
ensure_engine()
else:
raise
args = ['--package', recipes_cfg_path,
'--bootstrap-script', __file__] + sys.argv[1:]
return subprocess.call([
sys.executable, '-u',
os.path.join(engine_path, engine_subpath, 'recipes.py')] + args)
if __name__ == '__main__':
sys.exit(main())
|
rsteca/python-social-auth | refs/heads/master | social/apps/flask_app/template_filters.py | 84 | from flask import g, request
from social.backends.utils import user_backends_data
from social.apps.flask_app.utils import get_helper
def backends():
"""Load Social Auth current user data to context under the key 'backends'.
Will return the output of social.backends.utils.user_backends_data."""
return {
'backends': user_backends_data(g.user,
get_helper('AUTHENTICATION_BACKENDS'),
get_helper('STORAGE', do_import=True))
}
def login_redirect():
"""Load current redirect to context."""
value = request.form.get('next', '') or \
request.args.get('next', '')
return {
'REDIRECT_FIELD_NAME': 'next',
'REDIRECT_FIELD_VALUE': value,
'REDIRECT_QUERYSTRING': value and ('next=' + value) or ''
}
|
0x7678/youtube-dl | refs/heads/master | youtube_dl/extractor/ringtv.py | 22 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RingTVIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
"url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
"file": "857645.mp4",
"md5": "d25945f5df41cdca2d2587165ac28720",
"info_dict": {
"title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
"description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id').split('-')[0]
webpage = self._download_webpage(url, video_id)
if mobj.group('type') == 'news':
video_id = self._search_regex(
r'''(?x)<iframe[^>]+src="http://cms\.springboardplatform\.com/
embed_iframe/[0-9]+/video/([0-9]+)/''',
webpage, 'real video ID')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': thumbnail_url,
'description': description,
}
|
joram/sickbeard-orange | refs/heads/ThePirateBay | lib/requests/packages/urllib3/connectionpool.py | 17 | # urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
assert_fingerprint,
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': 80,
'https': 443,
}
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
return self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
**extra_params)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue
raise EmptyPoolError(self, "No pool connections are available.")
except BaseSSLError as e:
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except TimeoutError as e:
# Connection broken, discard.
conn = None
# Save the error off for retry logic.
err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
return self._prepare_conn(conn)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
rootulp/exercism | refs/heads/master | python/sublist/sublist.py | 1 | SUBLIST = 0
SUPERLIST = 1
EQUAL = 2
UNEQUAL = 3
def check_lists(l1, l2):
if l1 == l2:
return EQUAL
elif l1 in each_cons(l2, len(l1)):
return SUBLIST
elif l2 in each_cons(l1, len(l2)):
return SUPERLIST
else:
return UNEQUAL
# Not the most efficent
def each_cons(lst, size):
return [lst[i: i + size] for i in range(len(lst) - size + 1)]
|
upsuper/servo | refs/heads/master | tests/wpt/web-platform-tests/html/semantics/scripting-1/the-script-element/script-not-found-not-executed-2.py | 154 | def main(request, response):
headers = [("Content-Type", "text/javascript")]
body = "test2_token = \"script executed\";"
return 200, headers, body
|
fake-name/ReadableWebProxy | refs/heads/master | WebMirror/management/rss_parser_funcs/feed_parse_extractBrassboltsBlogspotCom.py | 1 |
def extractBrassboltsBlogspotCom(item):
'''
Parser for 'brassbolts.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
leoc/home-assistant | refs/heads/dev | homeassistant/components/vera.py | 6 | """
Support for Vera devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/vera/
"""
import logging
from collections import defaultdict
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.util.dt import utc_from_timestamp
from homeassistant.util import convert
from homeassistant.helpers import discovery
from homeassistant.helpers import config_validation as cv
from homeassistant.const import (
ATTR_ARMED, ATTR_BATTERY_LEVEL, ATTR_LAST_TRIP_TIME, ATTR_TRIPPED,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyvera==0.2.20']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'vera'
VERA_CONTROLLER = None
CONF_CONTROLLER = 'vera_controller_url'
CONF_EXCLUDE = 'exclude'
CONF_LIGHTS = 'lights'
ATTR_CURRENT_POWER_MWH = "current_power_mwh"
VERA_DEVICES = defaultdict(list)
VERA_ID_LIST_SCHEMA = vol.Schema([int])
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CONTROLLER): cv.url,
vol.Optional(CONF_EXCLUDE, default=[]): VERA_ID_LIST_SCHEMA,
vol.Optional(CONF_LIGHTS, default=[]): VERA_ID_LIST_SCHEMA
}),
}, extra=vol.ALLOW_EXTRA)
VERA_COMPONENTS = [
'binary_sensor', 'sensor', 'light', 'switch', 'lock', 'climate', 'cover'
]
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, base_config):
"""Common setup for Vera devices."""
global VERA_CONTROLLER
import pyvera as veraApi
config = base_config.get(DOMAIN)
base_url = config.get(CONF_CONTROLLER)
VERA_CONTROLLER, _ = veraApi.init_controller(base_url)
def stop_subscription(event):
"""Shutdown Vera subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
VERA_CONTROLLER.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_subscription)
try:
all_devices = VERA_CONTROLLER.get_devices()
except RequestException:
# There was a network related error connecting to the vera controller.
_LOGGER.exception("Error communicating with Vera API")
return False
exclude = config.get(CONF_EXCLUDE)
lights_ids = config.get(CONF_LIGHTS)
for device in all_devices:
if device.device_id in exclude:
continue
dev_type = map_vera_device(device, lights_ids)
if dev_type is None:
continue
VERA_DEVICES[dev_type].append(device)
for component in VERA_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, base_config)
return True
def map_vera_device(vera_device, remap):
"""Map vera classes to HA types."""
# pylint: disable=too-many-return-statements
import pyvera as veraApi
if isinstance(vera_device, veraApi.VeraDimmer):
return 'light'
if isinstance(vera_device, veraApi.VeraBinarySensor):
return 'binary_sensor'
if isinstance(vera_device, veraApi.VeraSensor):
return 'sensor'
if isinstance(vera_device, veraApi.VeraArmableDevice):
return 'switch'
if isinstance(vera_device, veraApi.VeraLock):
return 'lock'
if isinstance(vera_device, veraApi.VeraThermostat):
return 'climate'
if isinstance(vera_device, veraApi.VeraCurtain):
return 'cover'
if isinstance(vera_device, veraApi.VeraSwitch):
if vera_device.device_id in remap:
return 'light'
else:
return 'switch'
return None
class VeraDevice(Entity):
"""Representation of a Vera devicetity."""
def __init__(self, vera_device, controller):
"""Initialize the device."""
self.vera_device = vera_device
self.controller = controller
self._name = self.vera_device.name
self.controller.register(vera_device, self._update_callback)
self.update()
def _update_callback(self, _device):
self.update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self.vera_device.has_battery:
attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level + '%'
if self.vera_device.is_armable:
armed = self.vera_device.is_armed
attr[ATTR_ARMED] = 'True' if armed else 'False'
if self.vera_device.is_trippable:
last_tripped = self.vera_device.last_trip
if last_tripped is not None:
utc_time = utc_from_timestamp(int(last_tripped))
attr[ATTR_LAST_TRIP_TIME] = utc_time.isoformat()
else:
attr[ATTR_LAST_TRIP_TIME] = None
tripped = self.vera_device.is_tripped
attr[ATTR_TRIPPED] = 'True' if tripped else 'False'
power = self.vera_device.power
if power:
attr[ATTR_CURRENT_POWER_MWH] = convert(power, float, 0.0) * 1000
attr['Vera Device Id'] = self.vera_device.vera_device_id
return attr
|
alexvanboxel/airflow | refs/heads/master | airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py | 59 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
|
lakshayg/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/estimators/linear.py | 14 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1],
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner) as scope:
if all([isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
for fc in feature_columns]):
if joint_weights:
layer_fn = layers.joint_weighted_sum_from_feature_columns
else:
layer_fn = layers.weighted_sum_from_feature_columns
logits, _, _ = layer_fn(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope)
else:
logits = fc_core.linear_model(
features=features,
feature_columns=feature_columns,
units=head.logits_dimension,
weight_collections=[parent_scope])
def _train_op_fn(loss):
global_step = training_util.get_global_step()
my_vars = ops.get_collection(parent_scope)
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
assert head.logits_dimension == 1, ("SDCA only applies for "
"logits_dimension=1.")
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = training_util.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(
training_chief_hooks=(model_fn_ops.training_chief_hooks +
[update_weights_hook]))
return model_fn_ops
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
def input_fn_predict: # returns x, None.
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# predict_classes returns class indices.
estimator.predict_classes(input_fn=input_fn_predict)
```
If the user specifies `label_keys` in constructor, labels must be strings from
the `label_keys` vocabulary. Example:
```python
label_keys = ['label0', 'label1', 'label2']
estimator = LinearClassifier(
n_classes=n_classes,
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
label_keys=label_keys)
def input_fn_train: # returns x, y (where y is one of label_keys).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y is one of label_keys).
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
# predict_classes returns one of label_keys.
estimator.predict_classes(input_fn=input_fn_predict)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None,
label_keys=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
ValueError: if enable_centered_bias=True and optimizer is SDCAOptimizer.
"""
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
raise ValueError("enable_centered_bias is not supported with SDCA")
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
head = head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
label_keys=label_keys)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weight,
})
super(LinearClassifier, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearRegressor(estimator.Estimator):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.regression_head(
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
super(LinearRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearEstimator(estimator.Estimator):
"""Linear model with user specified head.
Train a generalized linear model to predict label value given observation of
feature values.
Example:
To do poisson regression,
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearEstimator(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
head=head_lib.poisson_regression_head())
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
head,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearEstimator` object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
head: An instance of _Head class.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearEstimator` estimator.
Raises:
ValueError: if optimizer is not supported, e.g., SDCAOptimizer
"""
assert feature_columns
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("LinearEstimator does not support SDCA optimizer.")
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
}
super(LinearEstimator, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
|
mssurajkaiga/rhythmbox | refs/heads/master | plugins/artsearch/musicbrainz.py | 2 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 Jonathan Matthew <jonathan@d14n.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import xml.dom.minidom as dom
import rb, urllib
from gi.repository import RB
# musicbrainz URLs
MUSICBRAINZ_RELEASE_URL = "http://musicbrainz.org/ws/2/release/%s?inc=artists"
MUSICBRAINZ_RELEASE_PREFIX = "http://musicbrainz.org/release/"
MUSICBRAINZ_RELEASE_SUFFIX = ".html"
MUSICBRAINZ_SEARCH_QUERY = "artist:\"%s\" AND release:\"%s\""
MUSICBRAINZ_SEARCH_URL = "http://musicbrainz.org/ws/2/release/?query=%s&limit=1"
# musicbrainz IDs
MUSICBRAINZ_VARIOUS_ARTISTS = "89ad4ac3-39f7-470e-963a-56509c546377"
# Amazon URL bits
AMAZON_IMAGE_URL = "http://images.amazon.com/images/P/%s.01.LZZZZZZZ.jpg"
class MusicBrainzSearch(object):
def get_release_cb (self, data, args):
(key, store, callback, cbargs) = args
if data is None:
print "musicbrainz release request returned nothing"
callback(*cbargs)
return
try:
parsed = dom.parseString(data)
storekey = RB.ExtDBKey.create_storage('album', key.get_field('album'))
# check that there's an artist that isn't 'various artists'
artist_tags = parsed.getElementsByTagName('artist')
if len(artist_tags) > 0:
artist_id = artist_tags[0].attributes['id'].firstChild.data
if artist_id != MUSICBRAINZ_VARIOUS_ARTISTS:
# add the artist name (as album-artist) to the storage key
nametags = artist_tags[0].getElementsByTagName('name')
if len(nametags) > 0:
artistname = nametags[0].firstChild.data
print "got musicbrainz artist name %s" % artistname
storekey.add_field('artist', artistname)
# look for an ASIN tag
asin_tags = parsed.getElementsByTagName('asin')
if len(asin_tags) > 0:
asin = asin_tags[0].firstChild.data
print "got ASIN %s" % asin
image_url = AMAZON_IMAGE_URL % asin
store.store_uri(storekey, RB.ExtDBSourceType.SEARCH, image_url)
else:
print "no ASIN for this release"
callback(*cbargs)
except Exception, e:
print "exception parsing musicbrainz response: %s" % e
callback(*cbargs)
def try_search_artist_album (self, key, store, callback, *args):
album = key.get_field("album")
artist = key.get_field("artist")
if not album or not artist:
print "artist or album information missing"
callback(*args)
return
query = MUSICBRAINZ_SEARCH_QUERY % (artist.lower(), album.lower())
url = MUSICBRAINZ_SEARCH_URL % (urllib.quote(query, safe=':'),)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
def search(self, key, last_time, store, callback, *args):
key = key.copy() # ugh
album_id = key.get_info("musicbrainz-albumid")
if album_id is None:
print "no musicbrainz release ID for this track"
self.try_search_artist_album(key, store, callback, args)
return
if album_id.startswith(MUSICBRAINZ_RELEASE_PREFIX):
album_id = album_id[len(MUSICBRAINZ_RELEASE_PREFIX):]
if album_id.endswith(MUSICBRAINZ_RELEASE_SUFFIX):
album_id = album_id[:-len(MUSICBRAINZ_RELEASE_SUFFIX)]
print "stripped release ID: %s" % album_id
url = MUSICBRAINZ_RELEASE_URL % (album_id)
loader = rb.Loader()
loader.get_url(url, self.get_release_cb, (key, store, callback, args))
|
PeterWangPo/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/__init__.py | 122 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: Is this the right way to do this?
from webkitpy.tool.steps.addsvnmimetypeforpng import AddSvnMimetypeForPng
from webkitpy.tool.steps.applypatch import ApplyPatch
from webkitpy.tool.steps.applypatchwithlocalcommit import ApplyPatchWithLocalCommit
from webkitpy.tool.steps.applywatchlist import ApplyWatchList
from webkitpy.tool.steps.attachtobug import AttachToBug
from webkitpy.tool.steps.build import Build
from webkitpy.tool.steps.checkstyle import CheckStyle
from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
from webkitpy.tool.steps.closebug import CloseBug
from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
from webkitpy.tool.steps.closepatch import ClosePatch
from webkitpy.tool.steps.commit import Commit
from webkitpy.tool.steps.confirmdiff import ConfirmDiff
from webkitpy.tool.steps.createbug import CreateBug
from webkitpy.tool.steps.discardlocalchanges import DiscardLocalChanges
from webkitpy.tool.steps.editchangelog import EditChangeLog
from webkitpy.tool.steps.ensurebugisopenandassigned import EnsureBugIsOpenAndAssigned
from webkitpy.tool.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded
from webkitpy.tool.steps.haslanded import HasLanded
from webkitpy.tool.steps.obsoletepatches import ObsoletePatches
from webkitpy.tool.steps.options import Options
from webkitpy.tool.steps.postdiff import PostDiff
from webkitpy.tool.steps.postdiffforcommit import PostDiffForCommit
from webkitpy.tool.steps.postdiffforrevert import PostDiffForRevert
from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
from webkitpy.tool.steps.preparechangelogforrevert import PrepareChangeLogForRevert
from webkitpy.tool.steps.promptforbugortitle import PromptForBugOrTitle
from webkitpy.tool.steps.reopenbugafterrollout import ReopenBugAfterRollout
from webkitpy.tool.steps.revertrevision import RevertRevision
from webkitpy.tool.steps.runtests import RunTests
from webkitpy.tool.steps.suggestreviewers import SuggestReviewers
from webkitpy.tool.steps.update import Update
from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
from webkitpy.tool.steps.validatechangelogs import ValidateChangeLogs
from webkitpy.tool.steps.validatereviewer import ValidateReviewer
|
indashnet/InDashNet.Open.UN2000 | refs/heads/master | android/cts/tools/utils/cts/__init__.py | 6 | #!/usr/bin/python2.4
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package initialization for the cts package."""
__all__ = ['tools']
|
lwiecek/django | refs/heads/master | tests/utils_tests/test_itercompat.py | 569 | from django.test import TestCase
from .models import Category, Thing
class TestIsIterator(TestCase):
def test_regression(self):
"""This failed on Django 1.5/Py2.6 because category has a next method."""
category = Category.objects.create(name='category')
Thing.objects.create(category=category)
Thing.objects.filter(category=category)
|
CompassionCH/compassion-modules | refs/heads/10.0 | sponsorship_compassion/mappings/cancel_sponsorship_mapping.py | 4 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Michael Sandoz <michaelsandoz87@gmail.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import fields
from .base_sponsorship_mapping import BaseSponsorshipMapping
from datetime import datetime
class CancelSponsorship(BaseSponsorshipMapping):
""" This class contains the mapping between Odoo fields and GMC field names
for delete a sponsorship.
"""
MAPPING_NAME = 'CancelSponsorship'
FIELDS_TO_SUBMIT = {
"FinalCommitmentOfLine": None,
"Beneficiary_GlobalID": None,
"HoldExpirationDate": None,
"Commitment_ID": None,
"SponsorSupporterGlobalID": None,
"GlobalPartner_ID": None,
"HoldType": None,
"DelinkType": None,
"PrimaryHoldOwner": None
}
def __init__(self, env):
super(CancelSponsorship, self).__init__(env)
self.CONNECT_MAPPING['HoldID'] = 'hold_id'
def _process_connect_data(self, connect_data):
# Set end date to correct format for Connect
end_date_str = connect_data.get(
'HoldExpirationDate') or fields.Datetime.now()
end_date = datetime.strptime(end_date_str, "%Y-%m-%d %H:%M:%S")
connect_data['HoldExpirationDate'] = end_date.strftime(
"%Y-%m-%dT%H:%M:%SZ")
|
shubhdev/edxOnBaadal | refs/heads/master | lms/djangoapps/certificates/badge_handler.py | 32 | """
BadgeHandler object-- used to award Badges to users who have completed courses.
"""
import hashlib
import logging
import mimetypes
from eventtracking import tracker
import requests
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.urlresolvers import reverse
from lazy import lazy
from requests.packages.urllib3.exceptions import HTTPError
from certificates.models import BadgeAssertion, BadgeImageConfiguration
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
class BadgeHandler(object):
"""
The only properly public method of this class is 'award'. If an alternative object is created for a different
badging service, the other methods don't need to be reproduced.
"""
# Global caching dict
badges = {}
def __init__(self, course_key):
self.course_key = course_key
assert settings.BADGR_API_TOKEN
@lazy
def base_url(self):
"""
Base URL for all API requests.
"""
return "{}/v1/issuer/issuers/{}".format(settings.BADGR_BASE_URL, settings.BADGR_ISSUER_SLUG)
@lazy
def badge_create_url(self):
"""
URL for generating a new Badge specification
"""
return "{}/badges".format(self.base_url)
def badge_url(self, mode):
"""
Get the URL for a course's badge in a given mode.
"""
return "{}/{}".format(self.badge_create_url, self.course_slug(mode))
def assertion_url(self, mode):
"""
URL for generating a new assertion.
"""
return "{}/assertions".format(self.badge_url(mode))
def course_slug(self, mode):
"""
Slug ought to be deterministic and limited in size so it's not too big for Badgr.
Badgr's max slug length is 255.
"""
# Seven digits should be enough to realistically avoid collisions. That's what git services use.
digest = hashlib.sha256(u"{}{}".format(unicode(self.course_key), unicode(mode))).hexdigest()[:7]
base_slug = slugify(unicode(self.course_key) + u'_{}_'.format(mode))[:248]
return base_slug + digest
def log_if_raised(self, response, data):
"""
Log server response if there was an error.
"""
try:
response.raise_for_status()
except HTTPError:
LOGGER.error(
u"Encountered an error when contacting the Badgr-Server. Request sent to %s with headers %s.\n"
u"and data values %s\n"
u"Response status was %s.\n%s",
repr(response.request.url), repr(response.request.headers),
repr(data),
response.status_code, response.body
)
raise
def get_headers(self):
"""
Headers to send along with the request-- used for authentication.
"""
return {'Authorization': 'Token {}'.format(settings.BADGR_API_TOKEN)}
def ensure_badge_created(self, mode):
"""
Verify a badge has been created for this mode of the course, and, if not, create it
"""
if self.course_slug(mode) in BadgeHandler.badges:
return
response = requests.get(self.badge_url(mode), headers=self.get_headers())
if response.status_code != 200:
self.create_badge(mode)
BadgeHandler.badges[self.course_slug(mode)] = True
@staticmethod
def badge_description(course, mode):
"""
Returns a description for the earned badge.
"""
if course.end:
return _(u'Completed the course "{course_name}" ({course_mode}, {start_date} - {end_date})').format(
start_date=course.start.date(),
end_date=course.end.date(),
course_name=course.display_name,
course_mode=mode,
)
else:
return _(u'Completed the course "{course_name}" ({course_mode})').format(
course_name=course.display_name,
course_mode=mode,
)
def site_prefix(self):
"""
Get the prefix for the site URL-- protocol and server name.
"""
scheme = u"https" if settings.HTTPS == "on" else u"http"
return u'{}://{}'.format(scheme, settings.SITE_NAME)
def create_badge(self, mode):
"""
Create the badge spec for a course's mode.
"""
course = modulestore().get_course(self.course_key)
image = BadgeImageConfiguration.image_for_mode(mode)
# We don't want to bother validating the file any further than making sure we can detect its MIME type,
# for HTTP. The Badgr-Server should tell us if there's anything in particular wrong with it.
content_type, __ = mimetypes.guess_type(image.name)
if not content_type:
raise ValueError(
"Could not determine content-type of image! Make sure it is a properly named .png file."
)
files = {'image': (image.name, image, content_type)}
about_path = reverse('about_course', kwargs={'course_id': unicode(self.course_key)})
data = {
'name': course.display_name,
'criteria': u'{}{}'.format(self.site_prefix(), about_path),
'slug': self.course_slug(mode),
'description': self.badge_description(course, mode)
}
result = requests.post(self.badge_create_url, headers=self.get_headers(), data=data, files=files)
self.log_if_raised(result, data)
def send_assertion_created_event(self, user, assertion):
"""
Send an analytics event to record the creation of a badge assertion.
"""
tracker.emit(
'edx.badge.assertion.created', {
'user_id': user.id,
'course_id': unicode(self.course_key),
'enrollment_mode': assertion.mode,
'assertion_id': assertion.id,
'assertion_image_url': assertion.data['image'],
'assertion_json_url': assertion.data['json']['id'],
'issuer': assertion.data['issuer'],
}
)
def create_assertion(self, user, mode):
"""
Register an assertion with the Badgr server for a particular user in a particular course mode for
this course.
"""
data = {
'email': user.email,
'evidence': self.site_prefix() + reverse(
'cert_html_view', kwargs={'user_id': user.id, 'course_id': unicode(self.course_key)}
) + '?evidence_visit=1'
}
response = requests.post(self.assertion_url(mode), headers=self.get_headers(), data=data)
self.log_if_raised(response, data)
assertion, __ = BadgeAssertion.objects.get_or_create(course_id=self.course_key, user=user, mode=mode)
assertion.data = response.json()
assertion.save()
self.send_assertion_created_event(user, assertion)
def award(self, user):
"""
Award a user a badge for their work on the course.
"""
mode = CourseEnrollment.objects.get(user=user, course_id=self.course_key).mode
self.ensure_badge_created(mode)
self.create_assertion(user, mode)
|
sinisterchipmunk/tomato | refs/heads/master | ext/tomato/external/scons/engine/SCons/Tool/gnulink.py | 34 | """SCons.Tool.gnulink
Tool-specific initialization for the gnu linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gnulink.py 5023 2010/06/14 22:05:46 scons"
import SCons.Util
import link
linkers = ['g++', 'gcc']
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
link.generate(env)
if env['PLATFORM'] == 'hpux':
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared -fPIC')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env.Append(LINKFLAGS=['$__RPATH'])
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
WCCCEDU/twitter-commons | refs/heads/master | src/python/twitter/common/zookeeper/named_value.py | 16 | from abc import ABCMeta, abstractproperty
from twitter.common.lang import Compatibility
class NamedValue(object):
__metaclass__ = ABCMeta
def __init__(self, value):
if isinstance(value, int):
self._value = value if value in self.map else 0
elif isinstance(value, Compatibility.string):
self._value = dict((v, k) for (k, v) in self.map.items()).get(value.upper(), 0)
else:
raise ValueError('Unknown value: %s' % value)
@abstractproperty
def map(self):
"""Returns the map from id => string"""
pass
def __str__(self):
return self.map.get(self._value, 'UNKNOWN')
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.map[self._value])
|
c-amr/camr | refs/heads/master | stanfordnlp/unidecode/x05d.py | 252 | data = (
'Lang ', # 0x00
'Kan ', # 0x01
'Lao ', # 0x02
'Lai ', # 0x03
'Xian ', # 0x04
'Que ', # 0x05
'Kong ', # 0x06
'Chong ', # 0x07
'Chong ', # 0x08
'Ta ', # 0x09
'Lin ', # 0x0a
'Hua ', # 0x0b
'Ju ', # 0x0c
'Lai ', # 0x0d
'Qi ', # 0x0e
'Min ', # 0x0f
'Kun ', # 0x10
'Kun ', # 0x11
'Zu ', # 0x12
'Gu ', # 0x13
'Cui ', # 0x14
'Ya ', # 0x15
'Ya ', # 0x16
'Gang ', # 0x17
'Lun ', # 0x18
'Lun ', # 0x19
'Leng ', # 0x1a
'Jue ', # 0x1b
'Duo ', # 0x1c
'Zheng ', # 0x1d
'Guo ', # 0x1e
'Yin ', # 0x1f
'Dong ', # 0x20
'Han ', # 0x21
'Zheng ', # 0x22
'Wei ', # 0x23
'Yao ', # 0x24
'Pi ', # 0x25
'Yan ', # 0x26
'Song ', # 0x27
'Jie ', # 0x28
'Beng ', # 0x29
'Zu ', # 0x2a
'Jue ', # 0x2b
'Dong ', # 0x2c
'Zhan ', # 0x2d
'Gu ', # 0x2e
'Yin ', # 0x2f
'[?] ', # 0x30
'Ze ', # 0x31
'Huang ', # 0x32
'Yu ', # 0x33
'Wei ', # 0x34
'Yang ', # 0x35
'Feng ', # 0x36
'Qiu ', # 0x37
'Dun ', # 0x38
'Ti ', # 0x39
'Yi ', # 0x3a
'Zhi ', # 0x3b
'Shi ', # 0x3c
'Zai ', # 0x3d
'Yao ', # 0x3e
'E ', # 0x3f
'Zhu ', # 0x40
'Kan ', # 0x41
'Lu ', # 0x42
'Yan ', # 0x43
'Mei ', # 0x44
'Gan ', # 0x45
'Ji ', # 0x46
'Ji ', # 0x47
'Huan ', # 0x48
'Ting ', # 0x49
'Sheng ', # 0x4a
'Mei ', # 0x4b
'Qian ', # 0x4c
'Wu ', # 0x4d
'Yu ', # 0x4e
'Zong ', # 0x4f
'Lan ', # 0x50
'Jue ', # 0x51
'Yan ', # 0x52
'Yan ', # 0x53
'Wei ', # 0x54
'Zong ', # 0x55
'Cha ', # 0x56
'Sui ', # 0x57
'Rong ', # 0x58
'Yamashina ', # 0x59
'Qin ', # 0x5a
'Yu ', # 0x5b
'Kewashii ', # 0x5c
'Lou ', # 0x5d
'Tu ', # 0x5e
'Dui ', # 0x5f
'Xi ', # 0x60
'Weng ', # 0x61
'Cang ', # 0x62
'Dang ', # 0x63
'Hong ', # 0x64
'Jie ', # 0x65
'Ai ', # 0x66
'Liu ', # 0x67
'Wu ', # 0x68
'Song ', # 0x69
'Qiao ', # 0x6a
'Zi ', # 0x6b
'Wei ', # 0x6c
'Beng ', # 0x6d
'Dian ', # 0x6e
'Cuo ', # 0x6f
'Qian ', # 0x70
'Yong ', # 0x71
'Nie ', # 0x72
'Cuo ', # 0x73
'Ji ', # 0x74
'[?] ', # 0x75
'Tao ', # 0x76
'Song ', # 0x77
'Zong ', # 0x78
'Jiang ', # 0x79
'Liao ', # 0x7a
'Kang ', # 0x7b
'Chan ', # 0x7c
'Die ', # 0x7d
'Cen ', # 0x7e
'Ding ', # 0x7f
'Tu ', # 0x80
'Lou ', # 0x81
'Zhang ', # 0x82
'Zhan ', # 0x83
'Zhan ', # 0x84
'Ao ', # 0x85
'Cao ', # 0x86
'Qu ', # 0x87
'Qiang ', # 0x88
'Zui ', # 0x89
'Zui ', # 0x8a
'Dao ', # 0x8b
'Dao ', # 0x8c
'Xi ', # 0x8d
'Yu ', # 0x8e
'Bo ', # 0x8f
'Long ', # 0x90
'Xiang ', # 0x91
'Ceng ', # 0x92
'Bo ', # 0x93
'Qin ', # 0x94
'Jiao ', # 0x95
'Yan ', # 0x96
'Lao ', # 0x97
'Zhan ', # 0x98
'Lin ', # 0x99
'Liao ', # 0x9a
'Liao ', # 0x9b
'Jin ', # 0x9c
'Deng ', # 0x9d
'Duo ', # 0x9e
'Zun ', # 0x9f
'Jiao ', # 0xa0
'Gui ', # 0xa1
'Yao ', # 0xa2
'Qiao ', # 0xa3
'Yao ', # 0xa4
'Jue ', # 0xa5
'Zhan ', # 0xa6
'Yi ', # 0xa7
'Xue ', # 0xa8
'Nao ', # 0xa9
'Ye ', # 0xaa
'Ye ', # 0xab
'Yi ', # 0xac
'E ', # 0xad
'Xian ', # 0xae
'Ji ', # 0xaf
'Xie ', # 0xb0
'Ke ', # 0xb1
'Xi ', # 0xb2
'Di ', # 0xb3
'Ao ', # 0xb4
'Zui ', # 0xb5
'[?] ', # 0xb6
'Ni ', # 0xb7
'Rong ', # 0xb8
'Dao ', # 0xb9
'Ling ', # 0xba
'Za ', # 0xbb
'Yu ', # 0xbc
'Yue ', # 0xbd
'Yin ', # 0xbe
'[?] ', # 0xbf
'Jie ', # 0xc0
'Li ', # 0xc1
'Sui ', # 0xc2
'Long ', # 0xc3
'Long ', # 0xc4
'Dian ', # 0xc5
'Ying ', # 0xc6
'Xi ', # 0xc7
'Ju ', # 0xc8
'Chan ', # 0xc9
'Ying ', # 0xca
'Kui ', # 0xcb
'Yan ', # 0xcc
'Wei ', # 0xcd
'Nao ', # 0xce
'Quan ', # 0xcf
'Chao ', # 0xd0
'Cuan ', # 0xd1
'Luan ', # 0xd2
'Dian ', # 0xd3
'Dian ', # 0xd4
'[?] ', # 0xd5
'Yan ', # 0xd6
'Yan ', # 0xd7
'Yan ', # 0xd8
'Nao ', # 0xd9
'Yan ', # 0xda
'Chuan ', # 0xdb
'Gui ', # 0xdc
'Chuan ', # 0xdd
'Zhou ', # 0xde
'Huang ', # 0xdf
'Jing ', # 0xe0
'Xun ', # 0xe1
'Chao ', # 0xe2
'Chao ', # 0xe3
'Lie ', # 0xe4
'Gong ', # 0xe5
'Zuo ', # 0xe6
'Qiao ', # 0xe7
'Ju ', # 0xe8
'Gong ', # 0xe9
'Kek ', # 0xea
'Wu ', # 0xeb
'Pwu ', # 0xec
'Pwu ', # 0xed
'Chai ', # 0xee
'Qiu ', # 0xef
'Qiu ', # 0xf0
'Ji ', # 0xf1
'Yi ', # 0xf2
'Si ', # 0xf3
'Ba ', # 0xf4
'Zhi ', # 0xf5
'Zhao ', # 0xf6
'Xiang ', # 0xf7
'Yi ', # 0xf8
'Jin ', # 0xf9
'Xun ', # 0xfa
'Juan ', # 0xfb
'Phas ', # 0xfc
'Xun ', # 0xfd
'Jin ', # 0xfe
'Fu ', # 0xff
)
|
p0lyb1us/polybius.fyi | refs/heads/master | reveal.js/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 1534 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
tanghong123/googletest | refs/heads/master | scripts/fuse_gtest_files.py | 2577 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
3nids/QGIS | refs/heads/master | tests/src/python/test_qgsrasterfilewritertask.py | 42 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterFileWriterTask.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '12/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import (
QgsApplication,
QgsCoordinateTransformContext,
QgsRasterLayer,
QgsRasterPipe,
QgsRasterFileWriter,
QgsRasterFileWriterTask
)
from qgis.PyQt.QtCore import QCoreApplication, QDir
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
def create_temp_filename(base_file):
return os.path.join(str(QDir.tempPath()), base_file)
class TestQgsRasterFileWriterTask(unittest.TestCase):
def setUp(self):
self.success = False
self.fail = False
def onSuccess(self):
self.success = True
def onFail(self):
self.fail = True
def testSuccess(self):
"""test successfully writing a layer"""
path = os.path.join(unitTestDataPath(), 'raster', 'with_color_table.tif')
raster_layer = QgsRasterLayer(path, "test")
self.assertTrue(raster_layer.isValid())
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(raster_layer.dataProvider().clone()))
tmp = create_temp_filename('success.tif')
writer = QgsRasterFileWriter(tmp)
task = QgsRasterFileWriterTask(writer, pipe, 100, 100, raster_layer.extent(), raster_layer.crs(), QgsCoordinateTransformContext())
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
self.assertTrue(self.success)
self.assertFalse(self.fail)
self.assertTrue(os.path.exists(tmp))
def testLayerRemovalBeforeRun(self):
"""test behavior when layer is removed before task begins"""
path = os.path.join(unitTestDataPath(), 'raster', 'with_color_table.tif')
raster_layer = QgsRasterLayer(path, "test")
self.assertTrue(raster_layer.isValid())
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(raster_layer.dataProvider().clone()))
tmp = create_temp_filename('remove_layer.tif')
writer = QgsRasterFileWriter(tmp)
task = QgsRasterFileWriterTask(writer, pipe, 100, 100, raster_layer.extent(), raster_layer.crs(), QgsCoordinateTransformContext())
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
# remove layer
raster_layer = None
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
# in this case will still get a positive result - since the pipe is cloned before the task
# begins the task is no longer dependent on the original layer
self.assertTrue(self.success)
self.assertFalse(self.fail)
self.assertTrue(os.path.exists(tmp))
def testFail(self):
"""test error writing a layer"""
path = os.path.join(unitTestDataPath(), 'raster', 'with_color_table.tif')
raster_layer = QgsRasterLayer(path, "test")
self.assertTrue(raster_layer.isValid())
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(raster_layer.dataProvider().clone()))
tmp = create_temp_filename("/this/is/invalid/file.tif")
writer = QgsRasterFileWriter(tmp)
task = QgsRasterFileWriterTask(writer, pipe, 100, 100, raster_layer.extent(), raster_layer.crs(), QgsCoordinateTransformContext())
task.writeComplete.connect(self.onSuccess)
task.errorOccurred.connect(self.onFail)
QgsApplication.taskManager().addTask(task)
while not self.success and not self.fail:
QCoreApplication.processEvents()
self.assertFalse(self.success)
self.assertTrue(self.fail)
if __name__ == '__main__':
unittest.main()
|
kexplo/miasm | refs/heads/master | example/disasm/single_instr.py | 9 | from miasm2.arch.x86.arch import mn_x86
from miasm2.arch.x86.regs import EDX
l = mn_x86.fromstring('MOV EAX, EBX', 32)
print "instruction:", l
print "arg:", l.args[0]
x = mn_x86.asm(l)
print x
l.args[0] = EDX
y = mn_x86.asm(l)
print y
print mn_x86.dis(y[0], 32)
|
baixuexue123/note | refs/heads/master | patterns/single_dispatch.py | 1 | from functools import singledispatch
@singledispatch
def func(a, b):
return 'default'
@func.register(int)
def _(a, b):
print('int')
return a+b
@func.register(str)
def _(a, b):
print('str')
return a+b
@func.register(tuple)
@func.register(list)
def _(a, b):
print('list or tuple')
return a+b
print(func(1, 2))
print(func('1', '2'))
print(func([1,2], [3,4]))
print(func((1,2), (3,4)))
class Score:
def __init__(self, val):
self.val = val
def __add__(self, other):
return self.val + other.val
@func.register(Score)
def _(a, b):
print('Score')
return a+b
print(func(Score(2), Score(3)))
|
rjeschmi/easybuild-framework | refs/heads/master | easybuild/framework/easyconfig/parser.py | 3 | # #
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
This describes the easyconfig parser
The parser is format version aware
@author: Stijn De Weirdt (Ghent University)
"""
import os
import re
from vsc.utils import fancylogger
from easybuild.framework.easyconfig.format.format import FORMAT_DEFAULT_VERSION
from easybuild.framework.easyconfig.format.format import get_format_version, get_format_version_classes
from easybuild.tools.filetools import read_file, write_file
# deprecated easyconfig parameters, and their replacements
DEPRECATED_PARAMETERS = {
# <old_param>: (<new_param>, <deprecation_version>),
}
# replaced easyconfig parameters, and their replacements
REPLACED_PARAMETERS = {
'license': 'license_file',
'makeopts': 'buildopts',
'premakeopts': 'prebuildopts',
}
_log = fancylogger.getLogger('easyconfig.parser', fname=False)
def fetch_parameters_from_easyconfig(rawtxt, params):
"""
Fetch (initial) parameter definition from the given easyconfig file contents.
@param rawtxt: contents of the easyconfig file
@param params: list of parameter names to fetch values for
"""
param_values = []
for param in params:
regex = re.compile(r"^\s*%s\s*=\s*(?P<param>\S.*?)\s*$" % param, re.M)
res = regex.search(rawtxt)
if res:
param_values.append(res.group('param').strip("'\""))
else:
param_values.append(None)
_log.debug("Obtained parameters value for %s: %s" % (params, param_values))
return param_values
class EasyConfigParser(object):
"""Read the easyconfig file, return a parsed config object
Can contain references to multiple version and toolchain/toolchain versions
"""
def __init__(self, filename=None, format_version=None, rawcontent=None):
"""Initialise the EasyConfigParser class"""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
self.rawcontent = None # the actual unparsed content
self.get_fn = None # read method and args
self.set_fn = None # write method and args
self.format_version = format_version
self._formatter = None
if rawcontent is not None:
self.rawcontent = rawcontent
self._set_formatter()
elif filename is not None:
self._check_filename(filename)
self.process()
else:
self.log.error("Neither filename nor rawcontent provided to EasyConfigParser")
def process(self, filename=None):
"""Create an instance"""
self._read(filename=filename)
self._set_formatter()
def _check_filename(self, fn):
"""Perform sanity check on the filename, and set mechanism to set the content of the file"""
if os.path.isfile(fn):
self.get_fn = (read_file, (fn,))
self.set_fn = (write_file, (fn, self.rawcontent))
self.log.debug("Process filename %s with get function %s, set function %s" % (fn, self.get_fn, self.set_fn))
if self.get_fn is None:
self.log.error('Failed to determine get function for filename %s' % fn)
if self.set_fn is None:
self.log.error('Failed to determine set function for filename %s' % fn)
def _read(self, filename=None):
"""Read the easyconfig, dump content in self.rawcontent"""
if filename is not None:
self._check_filename(filename)
try:
self.rawcontent = self.get_fn[0](*self.get_fn[1])
except IOError, err:
self.log.error('Failed to obtain content with %s: %s' % (self.get_fn, err))
if not isinstance(self.rawcontent, basestring):
msg = 'rawcontent is not basestring: type %s, content %s' % (type(self.rawcontent), self.rawcontent)
self.log.error("Unexpected result for raw content: %s" % msg)
def _det_format_version(self):
"""Extract the format version from the raw content"""
if self.format_version is None:
self.format_version = get_format_version(self.rawcontent)
if self.format_version is None:
self.format_version = FORMAT_DEFAULT_VERSION
self.log.debug('No version found, using default %s' % self.format_version)
def _get_format_version_class(self):
"""Locate the class matching the version"""
if self.format_version is None:
self._det_format_version()
found_classes = get_format_version_classes(version=self.format_version)
if len(found_classes) == 1:
return found_classes[0]
elif not found_classes:
self.log.error('No format classes found matching version %s' % self.format_version)
else:
msg = 'More than one format class found matching version %s in %s' % (self.format_version, found_classes)
self.log.error(msg)
def _set_formatter(self):
"""Obtain instance of the formatter"""
if self._formatter is None:
klass = self._get_format_version_class()
self._formatter = klass()
self._formatter.parse(self.rawcontent)
def set_format_text(self):
"""Create the text for the formatter instance"""
# TODO create the data in self.rawcontent
raise NotImplementedError
def write(self, filename=None):
"""Write the easyconfig format instance, using content in self.rawcontent."""
if filename is not None:
self._check_filename(filename)
try:
self.set_fn[0](*self.set_fn[1])
except IOError, err:
self.log.error('Failed to process content with %s: %s' % (self.set_fn, err))
def set_specifications(self, specs):
"""Set specifications."""
self._formatter.set_specifications(specs)
def get_config_dict(self, validate=True):
"""Return parsed easyconfig as a dict."""
# allows to bypass the validation step, typically for testing
if validate:
self._formatter.validate()
return self._formatter.get_config_dict()
|
40123148/w17b | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/heapq.py | 628 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
|
Yong-Lee/decode-Django | refs/heads/master | Django-1.5.1/django/views/generic/detail.py | 4 | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
显示 self.object 的数据, 用户可以重写 get_context_data 来自定义需要显示的数据,
DetailView 默认只是返回一个模块中单个记录的数据, 可以重写 get_context_data 来返回需要的数据
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation. 将来的操作
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get() 只要单个对象
except ObjectDoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Get the queryset to look an object up against. May not be called if
`get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured("%(cls)s is missing a queryset. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
})
return self.queryset._clone()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
return obj._meta.object_name.lower()
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
如果没有指定 template 的名字, 会自动推断
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object 用于呈现单个对象的视图
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self): 重写 get_template_names 推断模板的名字
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<object_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
names.append("%s/%s%s.html" % (
self.object._meta.app_label,
self.object._meta.object_name.lower(),
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
self.template_name_suffix
))
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
|
EliotBerriot/django | refs/heads/master | tests/template_tests/test_callables.py | 347 | from __future__ import unicode_literals
from unittest import TestCase
from django.template import Context, Engine
class CallableVariablesTests(TestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine()
super(CallableVariablesTests, cls).setUpClass()
def test_callable(self):
class Doodad(object):
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
# We can't access ``my_doodad.value`` in the template, because
# ``my_doodad.__call__`` will be invoked first, yielding a dictionary
# without a key ``value``.
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '')
# We can confirm that the doodad has been called
self.assertEqual(my_doodad.num_calls, 1)
# But we can access keys on the dict that's returned
# by ``__call__``, instead.
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '42')
self.assertEqual(my_doodad.num_calls, 2)
def test_alters_data(self):
class Doodad(object):
alters_data = True
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
# Since ``my_doodad.alters_data`` is True, the template system will not
# try to call our doodad but will use string_if_invalid
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '')
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '')
# Double-check that the object was really never called during the
# template rendering.
self.assertEqual(my_doodad.num_calls, 0)
def test_do_not_call(self):
class Doodad(object):
do_not_call_in_templates = True
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
# Since ``my_doodad.do_not_call_in_templates`` is True, the template
# system will not try to call our doodad. We can access its attributes
# as normal, and we don't have access to the dict that it returns when
# called.
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '42')
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '')
# Double-check that the object was really never called during the
# template rendering.
self.assertEqual(my_doodad.num_calls, 0)
def test_do_not_call_and_alters_data(self):
# If we combine ``alters_data`` and ``do_not_call_in_templates``, the
# ``alters_data`` attribute will not make any difference in the
# template system's behavior.
class Doodad(object):
do_not_call_in_templates = True
alters_data = True
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '42')
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '')
# Double-check that the object was really never called during the
# template rendering.
self.assertEqual(my_doodad.num_calls, 0)
|
elkingtonmcb/django | refs/heads/master | tests/auth_tests/test_signals.py | 312 | import datetime
from django.contrib.auth import signals
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
@override_settings(USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls')
class SignalTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def listener_login(self, user, **kwargs):
self.logged_in.append(user)
def listener_logout(self, user, **kwargs):
self.logged_out.append(user)
def listener_login_failed(self, sender, credentials, **kwargs):
self.login_failed.append(credentials)
def setUp(self):
"""Set up the listeners and reset the logged in/logged out counters"""
self.logged_in = []
self.logged_out = []
self.login_failed = []
signals.user_logged_in.connect(self.listener_login)
signals.user_logged_out.connect(self.listener_logout)
signals.user_login_failed.connect(self.listener_login_failed)
def tearDown(self):
"""Disconnect the listeners"""
signals.user_logged_in.disconnect(self.listener_login)
signals.user_logged_out.disconnect(self.listener_logout)
signals.user_login_failed.disconnect(self.listener_login_failed)
def test_login(self):
# Only a successful login will trigger the success signal.
self.client.login(username='testclient', password='bad')
self.assertEqual(len(self.logged_in), 0)
self.assertEqual(len(self.login_failed), 1)
self.assertEqual(self.login_failed[0]['username'], 'testclient')
# verify the password is cleansed
self.assertIn('***', self.login_failed[0]['password'])
# Like this:
self.client.login(username='testclient', password='password')
self.assertEqual(len(self.logged_in), 1)
self.assertEqual(self.logged_in[0].username, 'testclient')
# Ensure there were no more failures.
self.assertEqual(len(self.login_failed), 1)
def test_logout_anonymous(self):
# The log_out function will still trigger the signal for anonymous
# users.
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0], None)
def test_logout(self):
self.client.login(username='testclient', password='password')
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0].username, 'testclient')
def test_update_last_login(self):
"""Ensure that only `last_login` is updated in `update_last_login`"""
user = self.u3
old_last_login = user.last_login
user.username = "This username shouldn't get saved"
request = RequestFactory().get('/login')
signals.user_logged_in.send(sender=user.__class__, request=request,
user=user)
user = User.objects.get(pk=self.u3.pk)
self.assertEqual(user.username, 'staff')
self.assertNotEqual(user.last_login, old_last_login)
|
dmerejkowsky/qibuild | refs/heads/master | python/qibuild/test/test_project_config.py | 1 | ## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Automatic testing for qibuild.project.ProjectConfig
"""
from qibuild.config import ProjectConfig
import unittest
from StringIO import StringIO
def cfg_from_string(str, user_config=None):
cfg_loc = StringIO(str)
project_cfg = ProjectConfig()
project_cfg.read(cfg_loc)
return project_cfg
class ProjectConfigTestClass(unittest.TestCase):
def test_simple_read(self):
xml = """
<project name="foo" />
"""
project_cfg = cfg_from_string(xml)
self.assertEqual(project_cfg.name, "foo")
def test_read_depends(self):
xml = """
<project name="foo">
<depends runtime="true" buildtime="true"
names="bar baz"
/>
<depends runtime="true"
names="spam" />
/>
<depends buildtime="true"
names="eggs"
/>
<depends testtime="true"
names="gtest"
/>
</project>
"""
project_cfg = cfg_from_string(xml)
self.assertEqual(project_cfg.build_depends, set(["bar", "baz", "eggs"]))
self.assertEqual(project_cfg.run_depends, set(["bar", "baz", "spam"]))
self.assertEqual(project_cfg.test_depends, set(["gtest"]))
def test_write(tmpdir):
cfg = ProjectConfig()
cfg.name = "foobar"
cfg.build_depends = set(["foo", "bar"])
cfg.run_depends = set(["foo"])
cfg.test_depends = set(["foo", "bar", "gtest"])
xml = tmpdir.join("project.xml")
cfg.write(xml.strpath)
cfg2 = ProjectConfig()
cfg2.read(xml.strpath)
assert cfg2 == cfg
if __name__ == "__main__":
unittest.main()
|
iho/wagtail | refs/heads/master | wagtail/wagtailusers/tests.py | 7 | from __future__ import unicode_literals
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import Page, GroupPagePermission
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/torchbox/wagtail/issues/565
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
# Login
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(username='testuser', email='testuser@email.com', password='password')
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
@unittest.expectedFailure
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'name': "test group",
'page_permissions-0-id': [''],
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_type': ['publish'],
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'], # as we have one page permission already
'page_permissions-0-id': [self.root_add_permission.id],
'page_permissions-0-page': [self.root_add_permission.page.id],
'page_permissions-0-permission_type': [self.root_add_permission.permission_type]
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_type': ['publish'],
'page_permissions-2-id': [''],
'page_permissions-2-page': ['1'],
'page_permissions-2-permission_type': ['edit'],
'page_permissions-TOTAL_FORMS': ['3'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permissions to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
root_edit_perm = GroupPagePermission.objects.create(page=self.root_page,
permission_type='edit',
group=self.test_group)
# The test group now has two page permissions
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
self.assertEqual(response.context['formset'].management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(response.context['formset'].forms[0].instance, self.root_add_permission)
self.assertEqual(response.context['formset'].forms[1].instance, root_edit_perm)
def test_duplicate_page_permissions_error(self):
# Try to submit duplicate page permission entries
response = self.post({
'page_permissions-1-id': [''],
'page_permissions-1-page': [self.root_add_permission.page.id],
'page_permissions-1-permission_type': [self.root_add_permission.permission_type],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the second form should have errors
self.assertEqual(bool(response.context['formset'].errors[0]), False)
self.assertEqual(bool(response.context['formset'].errors[1]), True)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
# See that the form is set up with the correct initial data
self.assertEqual(response.context['form'].initial.get('permissions'), list(original_permissions.values_list('id', flat=True)))
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
|
benschmaus/catapult | refs/heads/master | third_party/google-endpoints/pyasn1_modules/rfc3414.py | 127 | #
# SNMPv3 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc3414.txt
#
from pyasn1.type import univ, namedtype, namedval, tag, constraint
class UsmSecurityParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()),
namedtype.NamedType('msgAuthoritativeEngineBoots', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgAuthoritativeEngineTime', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgUserName', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
)
|
UK992/servo | refs/heads/master | tests/wpt/web-platform-tests/clear-site-data/support/echo-clear-site-data.py | 46 | import json
RESPONSE = """
<!DOCTYPE html>
<html>
<head>
<title>Clear-Site-Data</title>
<script src="test_utils.sub.js"></script>
</head>
<body>
<script>
/**
* A map between a datatype name and whether it is empty.
* @property Object.<string, boolean>
*/
var report = {};
Promise.all(TestUtils.DATATYPES.map(function(datatype) {
return datatype.isEmpty().then(function(isEmpty) {
report[datatype.name] = isEmpty;
});
})).then(function() {
window.top.postMessage(report, "*");
});
</script>
</body>
</html>
"""
# A support server that receives a list of datatypes in the GET query
# and returns a Clear-Site-Data header with those datatypes. The content
# of the response is a html site using postMessage to report the status
# of the datatypes, so that if used in an iframe, it can inform the
# embedder whether the data deletion succeeded.
def main(request, response):
types = [key for key in request.GET.keys()]
header = ",".join("\"" + type + "\"" for type in types)
return ([("Clear-Site-Data", header),
("Content-Type", "text/html")],
RESPONSE)
|
brianlsharp/MissionPlanner | refs/heads/master | Lib/encodings/quopri_codec.py | 87 | """Codec for quoted-printable encoding.
Like base64 and rot13, this returns Python strings, not Unicode.
"""
import codecs, quopri
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def quopri_encode(input, errors='strict'):
"""Encode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
# using str() because of cStringIO's Unicode undesired Unicode behavior.
f = StringIO(str(input))
g = StringIO()
quopri.encode(f, g, 1)
output = g.getvalue()
return (output, len(input))
def quopri_decode(input, errors='strict'):
"""Decode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
f = StringIO(str(input))
g = StringIO()
quopri.decode(f, g)
output = g.getvalue()
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return quopri_encode(input,errors)
def decode(self, input,errors='strict'):
return quopri_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return quopri_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return quopri_decode(input, self.errors)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
# encodings module API
def getregentry():
return codecs.CodecInfo(
name='quopri',
encode=quopri_encode,
decode=quopri_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
Komodo/dbexplorer_pgsql | refs/heads/master | platform/Linux_x86_64-gcc3/pylib/psycopg2/__init__.py | 10 | """A Python driver for PostgreSQL
psycopg is a PostgreSQL_ database adapter for the Python_ programming
language. This is version 2, a complete rewrite of the original code to
provide new-style classes for connection and cursor objects and other sweet
candies. Like the original, psycopg 2 was written with the aim of being very
small and fast, and stable as a rock.
Homepage: http://initd.org/projects/psycopg2
.. _PostgreSQL: http://www.postgresql.org/
.. _Python: http://www.python.org/
:Groups:
* `Connections creation`: connect
* `Value objects constructors`: Binary, Date, DateFromTicks, Time,
TimeFromTicks, Timestamp, TimestampFromTicks
"""
# psycopg/__init__.py - initialization of the psycopg module
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Import modules needed by _psycopg to allow tools like py2exe to do
# their work without bothering about the module dependencies.
#
# TODO: we should probably use the Warnings framework to signal a missing
# module instead of raising an exception (in case we're running a thin
# embedded Python or something even more devious.)
import sys, warnings
if sys.version_info[0] >= 2 and sys.version_info[1] >= 3:
try:
import datetime as _psycopg_needs_datetime
except:
warnings.warn(
"can't import datetime module probably needed by _psycopg",
RuntimeWarning)
if sys.version_info[0] >= 2 and sys.version_info[1] >= 4:
try:
import decimal as _psycopg_needs_decimal
except:
warnings.warn(
"can't import decimal module probably needed by _psycopg",
RuntimeWarning)
del sys, warnings
from psycopg2 import tz
# Import the DBAPI-2.0 stuff into top-level module.
from _psycopg import BINARY, NUMBER, STRING, DATETIME, ROWID
from _psycopg import Binary, Date, Time, Timestamp
from _psycopg import DateFromTicks, TimeFromTicks, TimestampFromTicks
from _psycopg import Error, Warning, DataError, DatabaseError, ProgrammingError
from _psycopg import IntegrityError, InterfaceError, InternalError
from _psycopg import NotSupportedError, OperationalError
from _psycopg import connect, apilevel, threadsafety, paramstyle
from _psycopg import __version__
# Register default adapters.
import psycopg2.extensions as _ext
_ext.register_adapter(tuple, _ext.SQL_IN)
__all__ = filter(lambda k: not k.startswith('_'), locals().keys())
|
daenamkim/ansible | refs/heads/devel | lib/ansible/modules/network/avi/avi_poolgroupdeploymentpolicy.py | 3 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_poolgroupdeploymentpolicy
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PoolGroupDeploymentPolicy Avi RESTful Object
description:
- This module is used to configure PoolGroupDeploymentPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
auto_disable_old_prod_pools:
description:
- It will automatically disable old production pools once there is a new production candidate.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
cloud_ref:
description:
- It is a reference to an object of type cloud.
description:
description:
- User defined description for the object.
evaluation_duration:
description:
- Duration of evaluation period for automatic deployment.
- Allowed values are 60-86400.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
name:
description:
- The name of the pool group deployment policy.
required: true
rules:
description:
- List of pgdeploymentrule.
scheme:
description:
- Deployment scheme.
- Enum options - BLUE_GREEN, CANARY.
- Default value when not specified in API or module is interpreted by Avi Controller as BLUE_GREEN.
target_test_traffic_ratio:
description:
- Target traffic ratio before pool is made production.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
tenant_ref:
description:
- It is a reference to an object of type tenant.
test_traffic_ratio_rampup:
description:
- Ratio of the traffic that is sent to the pool under test.
- Test ratio of 100 means blue green.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the pool group deployment policy.
webhook_ref:
description:
- Webhook configured with url that avi controller will pass back information about pool group, old and new pool information and current deployment
- rule results.
- It is a reference to an object of type webhook.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PoolGroupDeploymentPolicy object
avi_poolgroupdeploymentpolicy:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_poolgroupdeploymentpolicy
"""
RETURN = '''
obj:
description: PoolGroupDeploymentPolicy (api/poolgroupdeploymentpolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
auto_disable_old_prod_pools=dict(type='bool',),
cloud_ref=dict(type='str',),
description=dict(type='str',),
evaluation_duration=dict(type='int',),
name=dict(type='str', required=True),
rules=dict(type='list',),
scheme=dict(type='str',),
target_test_traffic_ratio=dict(type='int',),
tenant_ref=dict(type='str',),
test_traffic_ratio_rampup=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
webhook_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'poolgroupdeploymentpolicy',
set([]))
if __name__ == '__main__':
main()
|
bwhicks/PlinyProject | refs/heads/master | backend/letters/migrations/0003_add_citations.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-16 20:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0002_add_citation'),
('letters', '0002_set_ordering_letter'),
]
operations = [
migrations.AddField(
model_name='letter',
name='citations',
field=models.ManyToManyField(blank=True, to='common.Citation'),
),
]
|
cherokee/webserver | refs/heads/master | qa/142-SCGI-ExtraVars.py | 8 | import os
from base import *
from util import *
DIR = "/SCGI4/"
PORT = get_free_port()
PYTHON = look_for_python()
HDR1 = "X-Whatever"
VAL1 = "Value1"
HDR2 = "Something"
VAL2 = "Second"
SCRIPT = """
from pyscgi import *
class TestHandler (SCGIHandler):
def handle_request (self):
self.handle_post()
self.send('Content-Type: text/plain\\r\\n\\r\\n')
for v in self.env:
self.send('%%s: %%s\\n' %% (v, self.env[v]))
SCGIServer(TestHandler, port=%d).serve_forever()
""" % (PORT)
source = get_next_source()
CONF = """
vserver!1!rule!1420!match = directory
vserver!1!rule!1420!match!directory = %(DIR)s
vserver!1!rule!1420!handler = scgi
vserver!1!rule!1420!handler!pass_req_headers = 1
vserver!1!rule!1420!handler!balancer = round_robin
vserver!1!rule!1420!handler!balancer!source!1 = %(source)d
source!%(source)d!type = interpreter
source!%(source)d!host = localhost:%(PORT)d
source!%(source)d!interpreter = %(PYTHON)s %(scgi_file)s
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "SCGI IV: Extra variables"
self.request = "GET %s HTTP/1.0\r\n" %(DIR) + \
"%s: %s\r\n" % (HDR1, VAL1) + \
"%s: %s\r\n" % (HDR2, VAL2)
self.expected_error = 200
self.expected_content = ['%s: %s'%(get_forwarded_http_header(HDR1), VAL1),
'%s: %s'%(get_forwarded_http_header(HDR2), VAL2)]
self.forbidden_content = ['pyscgi', 'SCGIServer', 'write']
def Prepare (self, www):
scgi_file = self.WriteFile (www, "scgi_test4.scgi", 0444, SCRIPT)
pyscgi = os.path.join (www, 'pyscgi.py')
if not os.path.exists (pyscgi):
self.CopyFile ('pyscgi.py', pyscgi)
vars = globals()
vars['scgi_file'] = scgi_file
self.conf = CONF % (vars)
|
boooka/GeoPowerOff | refs/heads/master | venv/lib/python2.7/site-packages/django/db/models/fields/__init__.py | 15 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.functional import cached_property, curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'EmailField', 'Empty', 'Field',
'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
return connection.validation.check_field(self, **kwargs)
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = set(["choices", "validators", "db_tablespace"])
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.creation.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.creation.data_types_suffix.get(self.get_internal_type())
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_virtual_field(self)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.get_limit_choices_to())]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.get_limit_choices_to())]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if isinstance(self._choices, collections.Iterator):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
try:
max_length = int(self.max_length)
if max_length <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
except ValueError:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField, self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime (%s)"
" while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length", None) == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length", None) == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length", None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
|
pism/pism | refs/heads/master | examples/bedmap2/readgeom.py | 1 | #!/usr/bin/env python3
# readgeom reads bed elevation and ice thickness from BEDMAP2 files
# and either shows as figures or writes into NetCDF file
# edit these:
BM2PATH = '/home/ed/Desktop/bedmap2_bin/'
showgeom = False
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import sys
try:
from netCDF4 import Dataset as NC
except:
print("netCDF4 is not installed!")
sys.exit(1)
from PISMNC import PISMDataset as PNC
N = 6667
print("reading bedmap2 binary files from %s ..." % (BM2PATH))
fname = BM2PATH + 'bedmap2_bed.flt'
bed = ma.masked_equal(np.reshape(np.fromfile(fname, dtype=np.float32), (N, N)), -9999.0)
fname = BM2PATH + 'bedmap2_thickness.flt'
thk = ma.masked_equal(np.reshape(np.fromfile(fname, dtype=np.float32), (N, N)), -9999.0)
fname = BM2PATH + 'bedmap2_icemask_grounded_and_shelves.flt'
msk = ma.masked_equal(np.reshape(np.fromfile(fname, dtype=np.float32), (N, N)), -9999.0)
print(" range of bed = [%.2f, %.2f]" % (bed.min(), bed.max()))
print(" range of thk = [%.2f, %.2f]" % (thk.min(), thk.max()))
print(" range of msk = [%.2f, %.2f]" % (msk.min(), msk.max()))
if showgeom:
print("showing fields ...")
fig = plt.figure(1)
ax = plt.imshow(msk)
fig.colorbar(ax)
fig = plt.figure(2)
ax = plt.imshow(bed)
fig.colorbar(ax)
fig = plt.figure(3)
ax = plt.imshow(thk)
fig.colorbar(ax)
plt.show()
sys.exit(0)
#err = abs(bed[msk<0.5] + thk[msk<0.5] - srf[msk<0.5])
# print err.max()
outname = 'ant1kmgeom.nc'
print("writing NetCDF file '%s' ..." % outname)
try:
nc = PNC(outname, 'w', format='NETCDF3_CLASSIC')
except:
print("can't open file %s for writing" % outname)
exit(1)
print(" writing x,y ...")
dx = 1000.0
dy = 1000.0
x = np.linspace(0.0, (N - 1) * dx, N)
y = np.linspace(0.0, (N - 1) * dy, N)
nc.create_dimensions(x, y, time_dependent=False)
print(" writing topg ...")
nc.define_2d_field("topg", time_dependent=False,
attrs={"long_name": "elevation of bedrock",
"valid_range": (-9000.0, 9000.0),
"standard_name": "bedrock_altitude",
"units": "meters"})
nc.write_2d_field("topg", bed)
print(" writing thk ...")
nc.define_2d_field("thk", time_dependent=False,
attrs={"long_name": "thickness of ice sheet or ice shelf",
"valid_range": (0.0, 9000.0),
"standard_name": "land_ice_thickness",
"units": "meters"})
nc.write_2d_field("thk", thk)
nc.close()
print("done")
|
JetStarBlues/Nand-2-Tetris | refs/heads/master | Assembler/asm2bin_TECS_legacy.py | 1 | # ========================================================================================
#
# Description:
#
# Compiles Hack ASM (assembly) code to Hack BIN (binary) code
# Uses the vanilla TECS specification
#
# Attribution:
#
# Code by www.jk-quantized.com
#
# Redistribution and use of this code in source and binary forms must retain
# the above attribution notice and this condition.
#
# ========================================================================================
'''
--- Notes ---
> Instruction
Instruction - 0123456789ABCDEF
0 -> opCode
if opCode == 0, address instruction
123456789ABCDEF -> address
else, computation instruction
1 -> unused
2 -> unused
3 -> y = A (0) | M (1)
4 -> comp, zero_x
5 -> comp, not_x
6 -> comp, zero_y
7 -> comp, not_y
8 -> comp, and (0) | add (1)
9 -> comp, not_out
ABC -> destination
DEF -> jump
comp(utation) bits are sent to ALU
'''
# == Imports =================================================
# Built ins
import re
import os
# Hack computer
import Components._0__globalConstants as GC
from commonHelpers import *
# == Lookup Tables ===========================================
LT = {
'globalAddresses' : {
'@R0' : '@0',
'@R1' : '@1',
'@R2' : '@2',
'@R3' : '@3',
'@R4' : '@4',
'@R5' : '@5',
'@R6' : '@6',
'@R7' : '@7',
'@R8' : '@8',
'@R9' : '@9',
'@R10' : '@10',
'@R11' : '@11',
'@R12' : '@12',
'@R13' : '@13',
'@R14' : '@14',
'@R15' : '@15',
# VM structure
'@SP' : '@0',
'@LCL' : '@1',
'@ARG' : '@2',
'@THIS' : '@3',
'@THAT' : '@4',
'@TEMP0' : '@5',
'@TEMP1' : '@6',
'@TEMP2' : '@7',
'@TEMP3' : '@8',
'@TEMP4' : '@9',
'@TEMP5' : '@10',
'@TEMP6' : '@11',
'@TEMP7' : '@12',
'@GP0' : '@13',
'@GP1' : '@14',
'@GP2' : '@15',
'@SCREEN' : '@' + str( GC.SCREEN_MEMORY_MAP ),
'@KEYBOARD' : '@' + str( GC.KEYBOARD_MEMORY_MAP ),
'@MOUSE' : '@' + str( GC.MOUSE_MEMORY_MAP ),
},
'comp' : {
# ub1, ub0, ySel, zx, nx, zy, ny, f, no
'0' : '110101010',
'1' : '110111111',
'-1' : '110111010',
'D' : '110001100',
'A' : '110110000',
'!D' : '110001101',
'!A' : '110110001',
'-D' : '110001111',
'-A' : '110110011',
'D+1' : '110011111',
'A+1' : '110110111',
'D-1' : '110001110',
'A-1' : '110110010',
'D+A' : '110000010',
'A+D' : '110000010', # order doesn't matter
'D-A' : '110010011',
'A-D' : '110000111',
'D&A' : '110000000',
'A&D' : '110000000', # order doesn't matter
'D|A' : '110010101',
'A|D' : '110010101', # order doesn't matter
'M' : '111110000',
'!M' : '111110001',
'-M' : '111110011',
'M+1' : '111110111',
'M-1' : '111110010',
'D+M' : '111000010',
'M+D' : '111000010', # order doesn't matter
'D-M' : '111010011',
'M-D' : '111000111',
'D&M' : '111000000',
'M&D' : '111000000', # order doesn't matter
'D|M' : '111010101',
'M|D' : '111010101', # order doesn't matter
},
'dest' : {
# d3, d2, d1
'NULL' : '000',
'M' : '001',
'D' : '010',
'A' : '100',
'DM' : '011',
'MD' : '011', # order doesn't matter
'AM' : '101',
'MA' : '101', # order doesn't matter
'AD' : '110',
'DA' : '110', # order doesn't matter
'MDA' : '111',
'MAD' : '111', # order doesn't matter
'AMD' : '111', # order doesn't matter
'ADM' : '111', # order doesn't matter
'DMA' : '111', # order doesn't matter
'DAM' : '111' # order doesn't matter
},
'jump' : {
# j3, j2, j1
'NULL' : '000',
'JGT' : '001',
'JEQ' : '010',
'JLT' : '100',
'JGE' : '011',
'JLE' : '110',
'JNE' : '101',
'JMP' : '111'
}
}
# == Helpers =================================================
namePattern = '''
\w+ # letter|number|underscore sequence
(\.\w+)* # dot followed by w+
'''
namePattern = re.compile( namePattern, re.X )
def isValidName( name ):
return re.fullmatch( namePattern, name )
# == Debug ===================================================
def debugStuff( cmdList ):
# Print final assembly
for c in cmdList:
print( c )
print( '\n--\n' )
# Print labels
for kv in sorted(
knownAddresses_ProgramMemory.items(),
key = lambda x : int( x[ 1 ][ 1 : ] )
):
print( '{:<6} {}'.format( kv[ 1 ], kv[ 0 ] ) )
print( '\n--\n' )
# Print variables
for kv in sorted(
knownAddresses_DataMemory.items(),
key = lambda x : int( x[ 1 ][ 1 : ] )
):
print( '{:<6} {}'.format( kv[ 1 ], kv[ 0 ] ) )
# == Main ====================================================
# -- Constants -------------------------------------
# nBits = GC.N_BITS
nBits = 16
static_segment_start = GC.STATIC_START
static_segment_end = GC.STATIC_END
static_segment_size = static_segment_end - static_segment_start + 1
largest_immediate = 2 ** ( nBits - 1 ) - 1
largest_address = 2 ** 15 - 1
# -- Extraction -------------------------------------
# Select everything that is not a comment
cmdPattern = '''
^ # from beginning of string
.*? # select all characters until
(?=\/\/|[\r\n]) # reach start of a comment or the string's end
'''
cmdPattern = re.compile( cmdPattern, re.X )
def extractCmd( line ):
line = line.replace( ' ', '' ) # remove spaces
line = line.replace( '\t', '' ) # remove tabs
found = re.search( cmdPattern, line ) # select everything that is not a comment
if found:
return found.group( 0 )
else:
return None
def extractCmds( inputFile ):
commands = []
with open( inputFile, 'r' ) as file:
for line in file:
cmd = extractCmd( line )
if cmd:
commands.append( cmd )
return commands
# -- Translation -------------------------------------
knownAddresses_ProgramMemory = {}
knownAddresses_DataMemory = {}
knownAddresses_DataMemory.update( LT[ 'globalAddresses' ] ) # fill with global addresses
def handleLabels( cmdList ):
''' Remove labels and store their integer addresses '''
trimmedCmdList = []
for i in range( len( cmdList ) ):
cmd = cmdList[ i ]
if cmd[ 0 ] == '(':
label = cmd[ 1 : - 1 ] # get the label
if not isValidName( label ):
raise Exception( 'Invalid name - {}'.format( cmd ) )
addr = i - len( knownAddresses_ProgramMemory ) # and the corresponding address
# Note, subtraction is for '(label)' statements which will later be removed
knownAddresses_ProgramMemory[ '@{}'.format( label ) ] = '@{}'.format( addr ) # add it to dict of knownAddresses_ProgramMemory
else:
trimmedCmdList.append( cmd ) # not a label so include it
return trimmedCmdList
def handleVariables( cmdList ):
''' Replace label and variable names with integer addresses '''
freeAddress = static_segment_start
for i in range( len( cmdList ) ):
cmd = cmdList[ i ]
if cmd[ 0 ] == '@':
# Refers to an integer
if cmd[ 1 : ].isdigit():
# Check that valid immediate
if int( cmd[ 1 : ] ) > largest_immediate:
raise Exception( 'Invalid integer - {}'.format( cmd ) )
continue # skip
# Refers to a known label
elif cmd in knownAddresses_ProgramMemory:
cmdList[ i ] = knownAddresses_ProgramMemory[ cmd ]
# Refers to a known variable
elif cmd in knownAddresses_DataMemory:
cmdList[ i ] = knownAddresses_DataMemory[ cmd ]
# Allocate it
else:
name = cmd[ 1 : ]
if not isValidName( name ):
raise Exception( 'Invalid name - {}'.format( cmd ) )
if freeAddress > static_segment_end:
raise Exception( 'Ran out of static memory' )
newAddr = '@{}'.format( freeAddress ) # create new address
knownAddresses_DataMemory[ cmd ] = newAddr # add it to dict of knownAddresses_DataMemory
cmdList[ i ] = newAddr # and set it
freeAddress += 1 # register is no longer unallocated
print( 'Assembled program has {} global static variables. Maximum is {}.'.format(
freeAddress - static_segment_start,
static_segment_size
) )
# if freeAddress > static_segment_end:
# print( 'Assembled program exceeds maximum number of global variables by {}.'.format( freeAddress - static_segment_end ) )
return cmdList
def translateInstructions( cmdList ):
''' Translate assembly instructions to binary '''
binCmdList = []
for i in range( len( cmdList ) ):
#
cmd_s = cmdList[ i ]
cmd_b = None
# print( cmd_s )
# A instruction
if cmd_s[0] == '@':
opcode = '0'
addr = int( cmd_s[ 1 : ] )
addr = toBinary( addr, nBits - 1 )
cmd_b = opcode + addr
# HALT instruction (not vanilla TECS but useful)
elif cmd_s.upper() == 'HALT':
cmd_b = '1111110000000000'
# C instruction
else:
opcode = '1'
nUnusedBits = ( nBits - 16 ) # 16 bits used to encode opcode(1), dest(3), comp( 2 + 1 + 6 ), jump(3)
header = opcode + '1' * nUnusedBits
dest, comp, jump = [ None ] * 3
if '=' in cmd_s and ';' in cmd_s:
dest, comp, jump = re.split( '=|;', cmd_s )
elif '=' in cmd_s:
dest, comp = re.split( '=', cmd_s )
elif ';' in cmd_s:
comp, jump = re.split( ';', cmd_s )
# print( dest, comp, jump )
dest = LT[ 'dest' ][ dest.upper() ] if dest else LT[ 'dest' ][ 'NULL' ]
jump = LT[ 'jump' ][ jump.upper() ] if jump else LT[ 'jump' ][ 'NULL' ]
comp = LT[ 'comp' ][ comp.upper() ]
cmd_b = header + comp + dest + jump
#
binCmdList.append( cmd_b )
return binCmdList
def translateCmds( cmdList, debug ):
''' Translate assembly to binary '''
cmdList = handleLabels( cmdList )
cmdList = handleVariables( cmdList )
binCmdList = translateInstructions( cmdList )
if debug: debugStuff( cmdList )
return binCmdList
# -- Output --------------------------------------
def writeToOutputFile( binCmdList, outputFile ):
''' Generate an output file containing the binary commands '''
with open( outputFile, 'w' ) as file:
for cmd_binary in binCmdList:
file.write( cmd_binary )
file.write( '\n' )
# -- Run ------------------------------------------
def asm_to_bin( inputFile, outputFile, debug = False ):
# Read
cmds_assembly = extractCmds( inputFile )
# Translate
cmds_binary = translateCmds( cmds_assembly, debug )
print( 'Assembled program has {} lines. Maximum is {}.'.format( len( cmds_binary ), largest_address ) )
# Check size
if len( cmds_binary ) > largest_address:
print( 'Assembled program exceeds maximum length by {} lines.'.format( len( cmds_binary ) - largest_address ) )
# Write
writeToOutputFile( cmds_binary, outputFile )
# print( 'Done' )
def genBINFile( inputDirPath, debug = False ):
fileNames = os.listdir( inputDirPath )
for fileName in fileNames:
if fileName[ - 3 : ] == 'asm':
inputFilePath = inputDirPath + '/' + fileName
break # Translate only first encountered in directory
outputFilePath = inputDirPath + '/Main.bin'
asm_to_bin( inputFilePath, outputFilePath, debug )
|
Carrotsmile/CS428 | refs/heads/master | steerstats/steersuitedb/SFConfig.py | 8 | import psycopg2
# from Sequence import ConfigSequence
class SFConfig(object):
_id_name = "sf_ai_config_id"
_table_name = "sf_ai_config"
_insert_order = """
(sf_ai_config_id,
hidac_acceleration,
hidac_personal_space_threshold,
hidac_agent_repulsion_importance,
hidac_query_radius,
hidac_body_force,
hidac_agent_body_force,
hidac_sliding_friction_force,
hidac_agent_b,
hidac_agent_a,
hidac_wall_b,
hidac_wall_a,
hidac_max_speed
)"""
"""
sf_ai_config
(
sf_ai_config_id integer NOT NULL references config(config_id),
hidac_acceleration double precision not null,
hidac_personal_space_threshold double precision not null,
hidac_agent_repulsion_importance double precision not null,
hidac_query_radius double precision not null,
hidac_body_force double precision not null,
hidac_agent_body_force double precision not null,
hidac_agent_b double precision not null,
hidac_agent_a double precision not null,
hidac_wall_b double precision not null,
hidac_wall_a double precision not null,
hidac_max_speed double precision not null,
CONSTRAINT sf_ai_config_pkey PRIMARY KEY (sf_ai_config_id)
) ;"""
# data should be a properly formed dictionary
def __init__(self, data):
self._data=data
def getConfigData(self, cur, n):
cur.execute("SELECT * FROM " + self._table_name + " where " + self._id_name + " = "+ str(n))
row = cur.fetchone()
return SFConfig(row)
def setValuesFromDict(self, valuesDict):
self._data = valuesDict
def insertConfig(self, cur, data):
try:
cur.execute("INSERT INTO " + self._table_name + " " +
self._insert_order + " " +
"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", # 12 of them
(
data['sf_ai_config_id'],
data['hidac_acceleration'],
data['hidac_personal_space_threshold'],
data['hidac_agent_repulsion_importance'],
data['hidac_query_radius'],
data['hidac_body_force'],
data['hidac_agent_body_force'],
data['hidac_sliding_friction_force'],
data['hidac_agent_b'],
data['hidac_agent_a'],
data['hidac_wall_b'],
data['hidac_wall_a'],
data['hidac_max_speed']
)
)
return data['sf_ai_config_id']
except psycopg2.DatabaseError, e:
print 'Error SFConfig: %s' % e
def insertConfig2(self, cur):
return self.insertConfig(cur, self._data)
# sys.exit(1)
|
kennedyshead/home-assistant | refs/heads/dev | homeassistant/components/atag/water_heater.py | 5 | """ATAG water heater component."""
from homeassistant.components.water_heater import (
ATTR_TEMPERATURE,
STATE_ECO,
STATE_PERFORMANCE,
WaterHeaterEntity,
)
from homeassistant.const import STATE_OFF, TEMP_CELSIUS
from . import DOMAIN, WATER_HEATER, AtagEntity
SUPPORT_FLAGS_HEATER = 0
OPERATION_LIST = [STATE_OFF, STATE_ECO, STATE_PERFORMANCE]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Initialize DHW device from config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities([AtagWaterHeater(coordinator, WATER_HEATER)])
class AtagWaterHeater(AtagEntity, WaterHeaterEntity):
"""Representation of an ATAG water heater."""
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.coordinator.data.dhw.temperature
@property
def current_operation(self):
"""Return current operation."""
operation = self.coordinator.data.dhw.current_operation
return operation if operation in self.operation_list else STATE_OFF
@property
def operation_list(self):
"""List of available operation modes."""
return OPERATION_LIST
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if await self.coordinator.data.dhw.set_temp(kwargs.get(ATTR_TEMPERATURE)):
self.async_write_ha_state()
@property
def target_temperature(self):
"""Return the setpoint if water demand, otherwise return base temp (comfort level)."""
return self.coordinator.data.dhw.target_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.coordinator.data.dhw.max_temp
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.coordinator.data.dhw.min_temp
|
espadrine/opera | refs/heads/master | chromium/src/third_party/python_26/Lib/sched.py | 59 | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence).
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
Event = namedtuple('Event', 'time, priority, action, argument')
class scheduler:
def __init__(self, timefunc, delayfunc):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises RuntimeError.
"""
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
return not self._queue
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
|
vaas-krish/openthread | refs/heads/master | tools/harness-automation/cases/ed_6_3_1.py | 16 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class ED_6_3_1(HarnessCase):
role = HarnessCase.ROLE_ED
case = '6 3 1'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
ChihChengLiang/python-rtmbot | refs/heads/master | rtmbot/utils/module_loading.py | 6 | from importlib import import_module
def import_string(dotted_path):
"""
Source: django.utils.module_loading
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
raise ImportError(msg)
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
raise ImportError(msg)
|
zhengjue/mytornado | refs/heads/master | mydjiango/echo_site/manage.py | 1 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "echo_site.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
sjspence/epicPCR | refs/heads/master | filterBar.py | 1 | #!/usr/bin/env python
# filterBar.py
# 10/20/2014 Sarah J. Spencer, Alm Lab, MIT
# Filter Illumina sequence reads for matches to synthetic barcode primer
# structure. Export 16S sequences trimmed to common length.
from optparse import OptionParser
# Import data from fasta file and save in header and sequence variables
def importData(inputFileName):
rawHeaders = []
rawSeqs = []
inputFile = open(inputFileName,'r')
rawHeaders = []
rawSeqs = []
track = 'header'
for line in inputFile:
if track == 'header':
rawHeaders.append(line)
track = 'seq'
else:
rawSeqs.append(line)
track = 'header'
inputFile.close()
return rawHeaders, rawSeqs
# Parse the joined fasta read for designed primer sequence structure
# Output: 16S sequences from fusions that match the designed structure
def fwdparse(rawHeaders, rawSeqs, outputLength):
import sys
usableHeaders = []
usableSeqs = []
barcodes = []
for i,rawSeq in enumerate(rawSeqs):
if len(rawSeq) < (60+outputLength):
continue
if rawSeq[59+outputLength] == '\n':
continue
if ((rawSeq[20:49] == 'GATCATGACCCATTTGGAGAAGATGCAGC') and
(rawSeq[49] == 'A' or 'C') and
(rawSeq[50:59] == 'GCCGCGGTA')):
usableHeaders.append(rawHeaders[i].strip() +
' droplet_bc=' + rawSeq[0:20] + '\n')
usableSeqs.append(rawSeq[60:60+outputLength] + '\n')
return usableHeaders, usableSeqs
# This wrapper function takes user-defined input and output files as well as
# the common length to trim 16S sequences. These variables are used to parse
# and export 16S fasta sequences from fusions that match the barcode fusion
# structure.
def main():
parser = OptionParser()
parser.add_option("-i", "--input", dest="i",
help="Input fasta file of non-chimeric sequences.",
metavar="INPUT")
parser.add_option("-l", "--length", dest="l",
help="Length to trim output sequences.",
metavar="LENGTH")
parser.add_option("-o", "--output", dest="o",
help="Output fasta file of structure-filtered sequences.",
metavar="OUTPUT")
(options, args) = parser.parse_args()
#Filter for sequence structure and save filtered sequences
rawHeaders, rawSeqs = importData(options.i)
usableHeaders, usableSeqs = fwdparse(rawHeaders, rawSeqs, int(options.l))
outFile = open(options.o, 'w')
for i, header in enumerate(usableHeaders):
outFile.write(usableHeaders[i])
outFile.write(usableSeqs[i])
outFile.close()
if __name__ == "__main__":
main()
|
beiko-lab/gengis | refs/heads/master | bin/Lib/site-packages/numpy/oldnumeric/fft.py | 1 |
__all__ = ['fft', 'fft2d', 'fftnd', 'hermite_fft', 'inverse_fft',
'inverse_fft2d', 'inverse_fftnd',
'inverse_hermite_fft', 'inverse_real_fft',
'inverse_real_fft2d', 'inverse_real_fftnd',
'real_fft', 'real_fft2d', 'real_fftnd']
from numpy.fft import fft
from numpy.fft import fft2 as fft2d
from numpy.fft import fftn as fftnd
from numpy.fft import hfft as hermite_fft
from numpy.fft import ifft as inverse_fft
from numpy.fft import ifft2 as inverse_fft2d
from numpy.fft import ifftn as inverse_fftnd
from numpy.fft import ihfft as inverse_hermite_fft
from numpy.fft import irfft as inverse_real_fft
from numpy.fft import irfft2 as inverse_real_fft2d
from numpy.fft import irfftn as inverse_real_fftnd
from numpy.fft import rfft as real_fft
from numpy.fft import rfft2 as real_fft2d
from numpy.fft import rfftn as real_fftnd
|
eltonkevani/tempest_el_env | refs/heads/master | tempest/api/compute/admin/test_availability_zone.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import exceptions
from tempest.test import attr
class AvailabilityZoneAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Availability Zone API List that require admin privileges
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(AvailabilityZoneAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.availability_zone_client
cls.non_adm_client = cls.availability_zone_client
@attr(type='gate')
def test_get_availability_zone_list(self):
# List of availability zone
resp, availability_zone = self.client.get_availability_zone_list()
self.assertEqual(200, resp.status)
self.assertTrue(len(availability_zone) > 0)
@attr(type='gate')
def test_get_availability_zone_list_detail(self):
# List of availability zones and available services
resp, availability_zone = \
self.client.get_availability_zone_list_detail()
self.assertEqual(200, resp.status)
self.assertTrue(len(availability_zone) > 0)
@attr(type='gate')
def test_get_availability_zone_list_with_non_admin_user(self):
# List of availability zone with non-administrator user
resp, availability_zone = \
self.non_adm_client.get_availability_zone_list()
self.assertEqual(200, resp.status)
self.assertTrue(len(availability_zone) > 0)
@attr(type=['negative', 'gate'])
def test_get_availability_zone_list_detail_with_non_admin_user(self):
# List of availability zones and available services with
# non-administrator user
self.assertRaises(
exceptions.Unauthorized,
self.non_adm_client.get_availability_zone_list_detail)
class AvailabilityZoneAdminTestXML(AvailabilityZoneAdminTestJSON):
_interface = 'xml'
|
40223220/cd0504 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/_markupbase.py | 891 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
cpausmit/IntelROCCS | refs/heads/master | Api/src/crabApi.py | 3 | #!/usr/local/bin/python
#---------------------------------------------------------------------------------------------------
# Access CRAB3 data via HTCondor
#---------------------------------------------------------------------------------------------------
import os, htcondor, ConfigParser
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.Utils import formataddr
from subprocess import Popen, PIPE
class crabApi():
def __init__(self):
config = ConfigParser.RawConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'api.cfg'))
collectorName = config.get('crab', 'collector')
self.fromEmail = config.items('from_email')[0]
self.toEmails = config.items('error_emails')
# Will try to get schedulers 3 times before reporting an error
for attempt in range(3):
try:
self.collector = htcondor.Collector(collectorName)
except IOError, e:
continue
else:
break
else:
self.error(e)
def error(self, e):
# report error
title = "FATAL IntelROCCS Error -- CRAB"
text = "FATAL -- %s" % (str(e),)
msg = MIMEMultipart()
msg['Subject'] = title
msg['From'] = formataddr(self.fromEmail)
msg['To'] = self._toStr(self.toEmails)
msg1 = MIMEMultipart("alternative")
msgText1 = MIMEText("<pre>%s</pre>" % text, "html")
msgText2 = MIMEText(text)
msg1.attach(msgText2)
msg1.attach(msgText1)
msg.attach(msg1)
msg = msg.as_string()
p = Popen(["/usr/sbin/sendmail", "-toi"], stdin=PIPE)
p.communicate(msg)
print "FATAL -- %s" % (str(e))
def _toStr(self, toEmails):
names = [formataddr(email) for email in toEmails]
return ', '.join(names)
#===================================================================================================
# M A I N F U N C T I O N
#===================================================================================================
def locateAll(self, types):
returnObjects = self.collector.locateAll(types)
return returnObjects
def query(self, types, query, attributes=[]):
returnObjects = self.collector.query(types, query, attributes)
return returnObjects
def getJobs(self, timestamp):
data = []
query = 'TaskType =?= "ROOT" && JobStatus =?= 2 && QDate < %d' % (timestamp)
attributes = ["CRAB_InputData", "QDate", "CRAB_UserHN", "CRAB_JobCount", "DAG_NodesQueued"]
schedulers = self.locateAll(htcondor.DaemonTypes.Schedd)
for scheduler in schedulers:
# query all schedulers, if error retry up to 3 times
for attempt in range(3):
try:
schedd = htcondor.Schedd(scheduler)
jobs = schedd.query(query, attributes)
for job in jobs:
data.append(job)
except IOError, e:
continue
else:
break
else:
self.error(e)
return data
def getCpus(self, site):
cpus = 0
query = 'GLIDEIN_CMSSite =?= "%s" && CPUs > 0' % (site)
attributes = ["GLIDEIN_CMSSite", "CPUs"]
ads = self.query(htcondor.AdTypes.Startd, query, attributes)
for ad in ads:
cpus += ad.get("CPUs")
return cpus
|
sriprasanna/django-1.3.1 | refs/heads/master | django/contrib/gis/db/backend/__init__.py | 307 | from django.db import connection
if hasattr(connection.ops, 'spatial_version'):
from warnings import warn
warn('The `django.contrib.gis.db.backend` module was refactored and '
'renamed to `django.contrib.gis.db.backends` in 1.2. '
'All functionality of `SpatialBackend` '
'has been moved to the `ops` attribute of the spatial database '
'backend. A `SpatialBackend` alias is provided here for '
'backwards-compatibility, but will be removed in 1.3.')
SpatialBackend = connection.ops
|
blueboxgroup/nova | refs/heads/master | nova/compute/resources/vcpu.py | 58 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.compute.resources import base
LOG = logging.getLogger(__name__)
class VCPU(base.Resource):
"""VCPU compute resource plugin.
This is effectively a simple counter based on the vcpu requirement of each
instance.
"""
def __init__(self):
# initialize to a 'zero' resource.
# reset will be called to set real resource values
self._total = 0
self._used = 0
def reset(self, resources, driver):
# total vcpu is reset to the value taken from resources.
self._total = int(resources['vcpus'])
self._used = 0
def _get_requested(self, usage):
return int(usage.get('vcpus', 0))
def _get_limit(self, limits):
if limits and 'vcpu' in limits:
return int(limits.get('vcpu'))
def test(self, usage, limits):
requested = self._get_requested(usage)
limit = self._get_limit(limits)
LOG.debug('Total CPUs: %(total)d VCPUs, used: %(used).02f VCPUs' %
{'total': self._total, 'used': self._used})
if limit is None:
# treat resource as unlimited:
LOG.debug('CPUs limit not specified, defaulting to unlimited')
return
free = limit - self._used
# Oversubscribed resource policy info:
LOG.debug('CPUs limit: %(limit).02f VCPUs, free: %(free).02f VCPUs' %
{'limit': limit, 'free': free})
if requested > free:
return ('Free CPUs %(free).02f VCPUs < '
'requested %(requested)d VCPUs' %
{'free': free, 'requested': requested})
def add_instance(self, usage):
requested = int(usage.get('vcpus', 0))
self._used += requested
def remove_instance(self, usage):
requested = int(usage.get('vcpus', 0))
self._used -= requested
def write(self, resources):
resources['vcpus'] = self._total
resources['vcpus_used'] = self._used
def report_free(self):
free_vcpus = self._total - self._used
LOG.debug('Free VCPUs: %s' % free_vcpus)
|
DannySapz/is210-week-03-warmup | refs/heads/master | tests/test_task_13.py | 28 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests lesson 03 task 13."""
# Import Python libs
import unittest
# Import student file
import task_13
class L03T13TestCase(unittest.TestCase):
"""
Tests for lesson 03 task 13.
"""
def test_frac_dec_equal(self):
"""
Tests that the FRACVAL and DECVAL are equal.
"""
self.assertFalse(task_13.FRAC_DEC_EQUAL)
def test_dec_float_inequal(self):
"""
Tests that the DECVAL and FLOATVAL are inequal.
"""
self.assertTrue(task_13.DEC_FLOAT_INEQUAL)
if __name__ == '__main__':
unittest.main()
|
songyi199111/sentry | refs/heads/master | src/sentry/models/eventmapping.py | 23 | """
sentry.models.groupmeta
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
class EventMapping(Model):
__core__ = False
project = FlexibleForeignKey('sentry.Project')
group = FlexibleForeignKey('sentry.Group')
event_id = models.CharField(max_length=32)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_eventmapping'
unique_together = (('project', 'event_id'),)
__repr__ = sane_repr('project_id', 'group_id', 'event_id')
@property
def team(self):
return self.project.team
|
niosus/tapiriik | refs/heads/master | tapiriik/services/fit.py | 8 | from datetime import datetime, timedelta
from .interchange import WaypointType, ActivityStatisticUnit, ActivityType, LapIntensity, LapTriggerMethod
from .devices import DeviceIdentifier, DeviceIdentifierType
import struct
import sys
import pytz
class FITFileType:
Activity = 4 # The only one we care about now.
class FITManufacturer:
DEVELOPMENT = 255 # $1500/year for one of these numbers.
class FITEvent:
Timer = 0
Lap = 9
Activity = 26
class FITEventType:
Start = 0
Stop = 1
# It's not a coincidence that these enums match the ones in interchange perfectly
class FITLapIntensity:
Active = 0
Rest = 1
Warmup = 2
Cooldown = 3
class FITLapTriggerMethod:
Manual = 0
Time = 1
Distance = 2
PositionStart = 3
PositionLap = 4
PositionWaypoint = 5
PositionMarked = 6
SessionEnd = 7
FitnessEquipment = 8
class FITActivityType:
GENERIC = 0
RUNNING = 1
CYCLING = 2
TRANSITION = 3
FITNESS_EQUIPMENT = 4
SWIMMING = 5
WALKING = 6
ALL = 254
class FITMessageDataType:
def __init__(self, name, typeField, size, packFormat, invalid, formatter=None):
self.Name = name
self.TypeField = typeField
self.Size = size
self.PackFormat = packFormat
self.Formatter = formatter
self.InvalidValue = invalid
class FITMessageTemplate:
def __init__(self, name, number, *args, fields=None):
self.Name = name
self.Number = number
self.Fields = {}
self.FieldNameSet = set()
self.FieldNameList = []
if len(args) == 1 and type(args[0]) is dict:
fields = args[0]
self.Fields = fields
self.FieldNameSet = set(fields.keys()) # It strikes me that keys might already be a set?
else:
# Supply fields in order NUM, NAME, TYPE
for x in range(0, int(len(args)/3)):
n = x * 3
self.Fields[args[n+1]] = {"Name": args[n+1], "Number": args[n], "Type": args[n+2]}
self.FieldNameSet.add(args[n+1])
sortedFields = list(self.Fields.values())
sortedFields.sort(key = lambda x: x["Number"])
self.FieldNameList = [x["Name"] for x in sortedFields] # *ordered*
class FITMessageGenerator:
def __init__(self):
self._types = {}
self._messageTemplates = {}
self._definitions = {}
self._result = []
# All our convience functions for preparing the field types to be packed.
def stringFormatter(input):
raise Exception("Not implemented")
def dateTimeFormatter(input):
# UINT32
# Seconds since UTC 00:00 Dec 31 1989. If <0x10000000 = system time
if input is None:
return struct.pack("<I", 0xFFFFFFFF)
delta = round((input - datetime(hour=0, minute=0, month=12, day=31, year=1989)).total_seconds())
return struct.pack("<I", delta)
def msecFormatter(input):
# UINT32
if input is None:
return struct.pack("<I", 0xFFFFFFFF)
return struct.pack("<I", round((input if type(input) is not timedelta else input.total_seconds()) * 1000))
def mmPerSecFormatter(input):
# UINT16
if input is None:
return struct.pack("<H", 0xFFFF)
return struct.pack("<H", round(input * 1000))
def cmFormatter(input):
# UINT32
if input is None:
return struct.pack("<I", 0xFFFFFFFF)
return struct.pack("<I", round(input * 100))
def altitudeFormatter(input):
# UINT16
if input is None:
return struct.pack("<H", 0xFFFF)
return struct.pack("<H", round((input + 500) * 5)) # Increments of 1/5, offset from -500m :S
def semicirclesFormatter(input):
# SINT32
if input is None:
return struct.pack("<i", 0x7FFFFFFF) # FIT-defined invalid value
return struct.pack("<i", round(input * (2 ** 31 / 180)))
def versionFormatter(input):
# UINT16
if input is None:
return struct.pack("<H", 0xFFFF)
return struct.pack("<H", round(input * 100))
def defType(name, *args, **kwargs):
aliases = [name] if type(name) is not list else name
# Cheap cheap cheap
for alias in aliases:
self._types[alias] = FITMessageDataType(alias, *args, **kwargs)
defType(["enum", "file"], 0x00, 1, "B", 0xFF)
defType("sint8", 0x01, 1, "b", 0x7F)
defType("uint8", 0x02, 1, "B", 0xFF)
defType("sint16", 0x83, 2, "h", 0x7FFF)
defType(["uint16", "manufacturer"], 0x84, 2, "H", 0xFFFF)
defType("sint32", 0x85, 4, "i", 0x7FFFFFFF)
defType("uint32", 0x86, 4, "I", 0xFFFFFFFF)
defType("string", 0x07, None, None, 0x0, formatter=stringFormatter)
defType("float32", 0x88, 4, "f", 0xFFFFFFFF)
defType("float64", 0x89, 8, "d", 0xFFFFFFFFFFFFFFFF)
defType("uint8z", 0x0A, 1, "B", 0x00)
defType("uint16z", 0x0B, 2, "H", 0x00)
defType("uint32z", 0x0C, 4, "I", 0x00)
defType("byte", 0x0D, 1, "B", 0xFF) # This isn't totally correct, docs say "an array of bytes"
# Not strictly FIT fields, but convenient.
defType("date_time", 0x86, 4, None, 0xFFFFFFFF, formatter=dateTimeFormatter)
defType("duration_msec", 0x86, 4, None, 0xFFFFFFFF, formatter=msecFormatter)
defType("distance_cm", 0x86, 4, None, 0xFFFFFFFF, formatter=cmFormatter)
defType("mmPerSec", 0x84, 2, None, 0xFFFF, formatter=mmPerSecFormatter)
defType("semicircles", 0x85, 4, None, 0x7FFFFFFF, formatter=semicirclesFormatter)
defType("altitude", 0x84, 2, None, 0xFFFF, formatter=altitudeFormatter)
defType("version", 0x84, 2, None, 0xFFFF, formatter=versionFormatter)
def defMsg(name, *args):
self._messageTemplates[name] = FITMessageTemplate(name, *args)
defMsg("file_id", 0,
0, "type", "file",
1, "manufacturer", "manufacturer",
2, "product", "uint16",
3, "serial_number", "uint32z",
4, "time_created", "date_time",
5, "number", "uint16")
defMsg("file_creator", 49,
0, "software_version", "uint16",
1, "hardware_version", "uint8")
defMsg("activity", 34,
253, "timestamp", "date_time",
1, "num_sessions", "uint16",
2, "type", "enum",
3, "event", "enum", # Required
4, "event_type", "enum",
5, "local_timestamp", "date_time")
defMsg("session", 18,
253, "timestamp", "date_time",
2, "start_time", "date_time", # Vs timestamp, which was whenever the record was "written"/end of the session
7, "total_elapsed_time", "duration_msec", # Including pauses
8, "total_timer_time", "duration_msec", # Excluding pauses
59, "total_moving_time", "duration_msec",
5, "sport", "enum",
6, "sub_sport", "enum",
0, "event", "enum",
1, "event_type", "enum",
9, "total_distance", "distance_cm",
11,"total_calories", "uint16",
14, "avg_speed", "mmPerSec",
15, "max_speed", "mmPerSec",
16, "avg_heart_rate", "uint8",
17, "max_heart_rate", "uint8",
18, "avg_cadence", "uint8",
19, "max_cadence", "uint8",
20, "avg_power", "uint16",
21, "max_power", "uint16",
22, "total_ascent", "uint16",
23, "total_descent", "uint16",
49, "avg_altitude", "altitude",
50, "max_altitude", "altitude",
71, "min_altitude", "altitude",
57, "avg_temperature", "sint8",
58, "max_temperature", "sint8")
defMsg("lap", 19,
253, "timestamp", "date_time",
0, "event", "enum",
1, "event_type", "enum",
25, "sport", "enum",
23, "intensity", "enum",
24, "lap_trigger", "enum",
2, "start_time", "date_time", # Vs timestamp, which was whenever the record was "written"/end of the session
7, "total_elapsed_time", "duration_msec", # Including pauses
8, "total_timer_time", "duration_msec", # Excluding pauses
52, "total_moving_time", "duration_msec",
9, "total_distance", "distance_cm",
11,"total_calories", "uint16",
13, "avg_speed", "mmPerSec",
14, "max_speed", "mmPerSec",
15, "avg_heart_rate", "uint8",
16, "max_heart_rate", "uint8",
17, "avg_cadence", "uint8", # FIT rolls run and bike cadence into one
18, "max_cadence", "uint8",
19, "avg_power", "uint16",
20, "max_power", "uint16",
21, "total_ascent", "uint16",
22, "total_descent", "uint16",
42, "avg_altitude", "altitude",
43, "max_altitude", "altitude",
62, "min_altitude", "altitude",
50, "avg_temperature", "sint8",
51, "max_temperature", "sint8"
)
defMsg("record", 20,
253, "timestamp", "date_time",
0, "position_lat", "semicircles",
1, "position_long", "semicircles",
2, "altitude", "altitude",
3, "heart_rate", "uint8",
4, "cadence", "uint8",
5, "distance", "distance_cm",
6, "speed", "mmPerSec",
7, "power", "uint16",
13, "temperature", "sint8",
33, "calories", "uint16",
)
defMsg("event", 21,
253, "timestamp", "date_time",
0, "event", "enum",
1, "event_type", "enum")
defMsg("device_info", 23,
253, "timestamp", "date_time",
0, "device_index", "uint8",
1, "device_type", "uint8",
2, "manufacturer", "manufacturer",
3, "serial_number", "uint32z",
4, "product", "uint16",
5, "software_version", "version"
)
def _write(self, contents):
self._result.append(contents)
def GetResult(self):
return b''.join(self._result)
def _defineMessage(self, local_no, global_message, field_names):
assert local_no < 16 and local_no >= 0
if set(field_names) - set(global_message.FieldNameList):
raise ValueError("Attempting to use undefined fields %s" % (set(field_names) - set(global_message.FieldNameList)))
messageHeader = 0b01000000
messageHeader = messageHeader | local_no
local_fields = {}
arch = 0 # Little-endian
global_no = global_message.Number
field_count = len(field_names)
pack_tuple = (messageHeader, 0, arch, global_no, field_count)
for field_name in global_message.FieldNameList:
if field_name in field_names:
field = global_message.Fields[field_name]
field_type = self._types[field["Type"]]
pack_tuple += (field["Number"], field_type.Size, field_type.TypeField)
local_fields[field_name] = field
self._definitions[local_no] = FITMessageTemplate(global_message.Name, local_no, local_fields)
self._write(struct.pack("<BBBHB" + ("BBB" * field_count), *pack_tuple))
return self._definitions[local_no]
def GenerateMessage(self, name, **kwargs):
globalDefn = self._messageTemplates[name]
# Create a subset of the global message's fields
localFieldNamesSet = set()
for fieldName in kwargs:
localFieldNamesSet.add(fieldName)
# I'll look at this later
compressTS = False
# Are these fields covered by an existing local message type?
active_definition = None
for defn_n in self._definitions:
defn = self._definitions[defn_n]
if defn.Name == name:
if defn.FieldNameSet == localFieldNamesSet:
active_definition = defn
# If not, create a new local message type with these fields
if not active_definition:
active_definition_no = len(self._definitions)
active_definition = self._defineMessage(active_definition_no, globalDefn, localFieldNamesSet)
if compressTS and active_definition.Number > 3:
raise Exception("Can't use compressed timestamp when local message number > 3")
messageHeader = 0
if compressTS:
messageHeader = messageHeader | (1 << 7)
tsOffsetVal = -1 # TODO
messageHeader = messageHeader | (active_definition.Number << 4)
else:
messageHeader = messageHeader | active_definition.Number
packResult = [struct.pack("<B", messageHeader)]
for field_name in active_definition.FieldNameList:
field = active_definition.Fields[field_name]
field_type = self._types[field["Type"]]
try:
if field_type.Formatter:
result = field_type.Formatter(kwargs[field_name])
else:
sanitized_value = kwargs[field_name]
if sanitized_value is None:
result = struct.pack("<" + field_type.PackFormat, field_type.InvalidValue)
else:
if field_type.PackFormat in ["B","b", "H", "h", "I", "i"]:
sanitized_value = round(sanitized_value)
try:
result = struct.pack("<" + field_type.PackFormat, sanitized_value)
except struct.error as e: # I guess more specific exception types were too much to ask for.
if "<=" in str(e) or "out of range" in str(e):
result = struct.pack("<" + field_type.PackFormat, field_type.InvalidValue)
else:
raise
except Exception as e:
raise Exception("Failed packing %s=%s - %s" % (field_name, kwargs[field_name], e))
packResult.append(result)
self._write(b''.join(packResult))
class FITIO:
_sportMap = {
ActivityType.Other: 0,
ActivityType.Running: 1,
ActivityType.Cycling: 2,
ActivityType.MountainBiking: 2,
ActivityType.Elliptical: 4,
ActivityType.Swimming: 5,
}
_subSportMap = {
# ActivityType.MountainBiking: 8 there's an issue with cadence upload and this type with GC, so...
}
def _calculateCRC(bytestring, crc=0):
crc_table = [0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401, 0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400]
for byte in bytestring:
tmp = crc_table[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ crc_table[byte & 0xF]
tmp = crc_table[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ crc_table[(byte >> 4) & 0xF]
return crc
def _generateHeader(dataLength):
# We need to call this once the final records are assembled and their length is known, to avoid having to seek back
header_len = 12
protocolVer = 16 # The FIT SDK code provides these in a very rounabout fashion
profileVer = 810
tag = ".FIT"
return struct.pack("<BBHI4s", header_len, protocolVer, profileVer, dataLength, tag.encode("ASCII"))
def Parse(raw_file):
raise Exception("Not implemented")
def Dump(act):
def toUtc(ts):
if ts.tzinfo:
return ts.astimezone(pytz.utc).replace(tzinfo=None)
else:
raise ValueError("Need TZ data to produce FIT file")
fmg = FITMessageGenerator()
creatorInfo = {
"manufacturer": FITManufacturer.DEVELOPMENT,
"serial_number": 0,
"product": 15706
}
devInfo = {
"manufacturer": FITManufacturer.DEVELOPMENT,
"product": 15706,
"device_index": 0
}
if act.Device:
# GC can get along with out this, Strava needs it
devId = DeviceIdentifier.FindEquivalentIdentifierOfType(DeviceIdentifierType.FIT, act.Device.Identifier)
if devId:
creatorInfo = {
"manufacturer": devId.Manufacturer,
"product": devId.Product,
}
devInfo = {
"manufacturer": devId.Manufacturer,
"product": devId.Product,
"device_index": 0 # Required for GC
}
if act.Device.Serial:
creatorInfo["serial_number"] = int(act.Device.Serial) # I suppose some devices might eventually have alphanumeric serial #s
devInfo["serial_number"] = int(act.Device.Serial)
if act.Device.VersionMajor is not None:
assert act.Device.VersionMinor is not None
devInfo["software_version"] = act.Device.VersionMajor + act.Device.VersionMinor / 100
fmg.GenerateMessage("file_id", type=FITFileType.Activity, time_created=toUtc(act.StartTime), **creatorInfo)
fmg.GenerateMessage("device_info", **devInfo)
sport = FITIO._sportMap[act.Type] if act.Type in FITIO._sportMap else 0
subSport = FITIO._subSportMap[act.Type] if act.Type in FITIO._subSportMap else 0
session_stats = {
"total_elapsed_time": act.EndTime - act.StartTime,
}
# FIT doesn't have different fields for this, but it does have a different interpretation - we eventually need to divide by two in the running case.
# Further complicating the issue is that most sites don't differentiate the two, so they'll end up putting the run cadence back into the bike field.
use_run_cadence = act.Type in [ActivityType.Running, ActivityType.Walking, ActivityType.Hiking]
def _resolveRunCadence(bikeCad, runCad):
nonlocal use_run_cadence
if use_run_cadence:
return runCad if runCad is not None else (bikeCad if bikeCad is not None else None)
else:
return bikeCad
def _mapStat(dict, key, value):
if value is not None:
dict[key] = value
_mapStat(session_stats, "total_moving_time", act.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value)
_mapStat(session_stats, "total_timer_time", act.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value)
_mapStat(session_stats, "total_distance", act.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value)
_mapStat(session_stats, "total_calories", act.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value)
_mapStat(session_stats, "avg_speed", act.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Average)
_mapStat(session_stats, "max_speed", act.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Max)
_mapStat(session_stats, "avg_heart_rate", act.Stats.HR.Average)
_mapStat(session_stats, "max_heart_rate", act.Stats.HR.Max)
_mapStat(session_stats, "avg_cadence", _resolveRunCadence(act.Stats.Cadence.Average, act.Stats.RunCadence.Average))
_mapStat(session_stats, "max_cadence", _resolveRunCadence(act.Stats.Cadence.Max, act.Stats.RunCadence.Max))
_mapStat(session_stats, "avg_power", act.Stats.Power.Average)
_mapStat(session_stats, "max_power", act.Stats.Power.Max)
_mapStat(session_stats, "total_ascent", act.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Gain)
_mapStat(session_stats, "total_descent", act.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Loss)
_mapStat(session_stats, "avg_altitude", act.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Average)
_mapStat(session_stats, "max_altitude", act.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Max)
_mapStat(session_stats, "min_altitude", act.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Min)
_mapStat(session_stats, "avg_temperature", act.Stats.Temperature.asUnits(ActivityStatisticUnit.DegreesCelcius).Average)
_mapStat(session_stats, "max_temperature", act.Stats.Temperature.asUnits(ActivityStatisticUnit.DegreesCelcius).Max)
inPause = False
for lap in act.Laps:
for wp in lap.Waypoints:
if wp.Type == WaypointType.Resume and inPause:
fmg.GenerateMessage("event", timestamp=toUtc(wp.Timestamp), event=FITEvent.Timer, event_type=FITEventType.Start)
inPause = False
elif wp.Type == WaypointType.Pause and not inPause:
fmg.GenerateMessage("event", timestamp=toUtc(wp.Timestamp), event=FITEvent.Timer, event_type=FITEventType.Stop)
inPause = True
rec_contents = {"timestamp": toUtc(wp.Timestamp)}
if wp.Location:
rec_contents.update({"position_lat": wp.Location.Latitude, "position_long": wp.Location.Longitude})
if wp.Location.Altitude is not None:
rec_contents.update({"altitude": wp.Location.Altitude})
if wp.HR is not None:
rec_contents.update({"heart_rate": wp.HR})
if wp.RunCadence is not None:
rec_contents.update({"cadence": wp.RunCadence})
if wp.Cadence is not None:
rec_contents.update({"cadence": wp.Cadence})
if wp.Power is not None:
rec_contents.update({"power": wp.Power})
if wp.Temp is not None:
rec_contents.update({"temperature": wp.Temp})
if wp.Calories is not None:
rec_contents.update({"calories": wp.Calories})
if wp.Distance is not None:
rec_contents.update({"distance": wp.Distance})
if wp.Speed is not None:
rec_contents.update({"speed": wp.Speed})
fmg.GenerateMessage("record", **rec_contents)
# Man, I love copy + paste and multi-cursor editing
# But seriously, I'm betting that, some time down the road, a stat will pop up in X but not in Y, so I won't feel so bad about the C&P abuse
lap_stats = {}
_mapStat(lap_stats, "total_elapsed_time", lap.EndTime - lap.StartTime)
_mapStat(lap_stats, "total_moving_time", lap.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value)
_mapStat(lap_stats, "total_timer_time", lap.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value)
_mapStat(lap_stats, "total_distance", lap.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value)
_mapStat(lap_stats, "total_calories", lap.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value)
_mapStat(lap_stats, "avg_speed", lap.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Average)
_mapStat(lap_stats, "max_speed", lap.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Max)
_mapStat(lap_stats, "avg_heart_rate", lap.Stats.HR.Average)
_mapStat(lap_stats, "max_heart_rate", lap.Stats.HR.Max)
_mapStat(lap_stats, "avg_cadence", _resolveRunCadence(lap.Stats.Cadence.Average, lap.Stats.RunCadence.Average))
_mapStat(lap_stats, "max_cadence", _resolveRunCadence(lap.Stats.Cadence.Max, lap.Stats.RunCadence.Max))
_mapStat(lap_stats, "avg_power", lap.Stats.Power.Average)
_mapStat(lap_stats, "max_power", lap.Stats.Power.Max)
_mapStat(lap_stats, "total_ascent", lap.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Gain)
_mapStat(lap_stats, "total_descent", lap.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Loss)
_mapStat(lap_stats, "avg_altitude", lap.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Average)
_mapStat(lap_stats, "max_altitude", lap.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Max)
_mapStat(lap_stats, "min_altitude", lap.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters).Min)
_mapStat(lap_stats, "avg_temperature", lap.Stats.Temperature.asUnits(ActivityStatisticUnit.DegreesCelcius).Average)
_mapStat(lap_stats, "max_temperature", lap.Stats.Temperature.asUnits(ActivityStatisticUnit.DegreesCelcius).Max)
# These are some really... stupid lookups.
# Oh well, futureproofing.
lap_stats["intensity"] = ({
LapIntensity.Active: FITLapIntensity.Active,
LapIntensity.Rest: FITLapIntensity.Rest,
LapIntensity.Warmup: FITLapIntensity.Warmup,
LapIntensity.Cooldown: FITLapIntensity.Cooldown,
})[lap.Intensity]
lap_stats["lap_trigger"] = ({
LapTriggerMethod.Manual: FITLapTriggerMethod.Manual,
LapTriggerMethod.Time: FITLapTriggerMethod.Time,
LapTriggerMethod.Distance: FITLapTriggerMethod.Distance,
LapTriggerMethod.PositionStart: FITLapTriggerMethod.PositionStart,
LapTriggerMethod.PositionLap: FITLapTriggerMethod.PositionLap,
LapTriggerMethod.PositionWaypoint: FITLapTriggerMethod.PositionWaypoint,
LapTriggerMethod.PositionMarked: FITLapTriggerMethod.PositionMarked,
LapTriggerMethod.SessionEnd: FITLapTriggerMethod.SessionEnd,
LapTriggerMethod.FitnessEquipment: FITLapTriggerMethod.FitnessEquipment,
})[lap.Trigger]
fmg.GenerateMessage("lap", timestamp=toUtc(lap.EndTime), start_time=toUtc(lap.StartTime), event=FITEvent.Lap, event_type=FITEventType.Start, sport=sport, **lap_stats)
# These need to be at the end for Strava
fmg.GenerateMessage("session", timestamp=toUtc(act.EndTime), start_time=toUtc(act.StartTime), sport=sport, sub_sport=subSport, event=FITEvent.Timer, event_type=FITEventType.Start, **session_stats)
fmg.GenerateMessage("activity", timestamp=toUtc(act.EndTime), local_timestamp=act.EndTime.replace(tzinfo=None), num_sessions=1, type=FITActivityType.GENERIC, event=FITEvent.Activity, event_type=FITEventType.Stop)
records = fmg.GetResult()
header = FITIO._generateHeader(len(records))
crc = FITIO._calculateCRC(records, FITIO._calculateCRC(header))
return header + records + struct.pack("<H", crc)
|
ajaali/django | refs/heads/master | django/core/checks/compatibility/django_1_8_0.py | 286 | from __future__ import unicode_literals
from django.conf import global_settings, settings
from .. import Tags, Warning, register
@register(Tags.compatibility)
def check_duplicate_template_settings(app_configs, **kwargs):
if settings.TEMPLATES:
values = [
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
]
duplicates = [
value for value in values
if getattr(settings, value) != getattr(global_settings, value)
]
if duplicates:
return [Warning(
"The standalone TEMPLATE_* settings were deprecated in Django "
"1.8 and the TEMPLATES dictionary takes precedence. You must "
"put the values of the following settings into your default "
"TEMPLATES dict: %s." % ", ".join(duplicates),
id='1_8.W001',
)]
return []
|
willworth/thermos | refs/heads/master | thermos/Lib/site-packages/pip/_vendor/distlib/util.py | 327 | #
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on macOS
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
|
MeirKriheli/Open-Knesset | refs/heads/master | committees/migrations/0014_add_committee_type.py | 14 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Committee.type'
db.add_column('committees_committee', 'type',
self.gf('django.db.models.fields.CharField')(default='committee', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Committee.type'
db.delete_column('committees_committee', 'type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'aliases': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'chaired_committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'portal_knesset_broadcasts_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'replacing_in_committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'committee'", 'max_length': '10'})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'committees.protocolpart': {
'Meta': {'ordering': "('order', 'id')", 'object_name': 'ProtocolPart'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['committees.CommitteeMeeting']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'protocol_parts'", 'null': 'True', 'to': "orm['persons.Person']"})
},
'committees.topic': {
'Meta': {'object_name': 'Topic'},
'committees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['committees.Committee']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'editing_topics'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meetings': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'when_over': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'when_over_guessed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_coalition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_own_bill': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['committees'] |
bankonme/password-manager | refs/heads/master | backend/flask/src/clipperz.py | 9 | from clipperz import app, db
def main():
db.create_all()
app.run(debug=True)
if __name__ == "__main__":
main()
|
fxstein/cement | refs/heads/master | cement/core/exc.py | 6 | """Cement core exceptions module."""
class FrameworkError(Exception):
"""
General framework (non-application) related errors.
:param msg: The error message.
"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
class InterfaceError(FrameworkError):
"""Interface related errors."""
pass
class CaughtSignal(FrameworkError):
"""
Raised when a defined signal is caught. For more information regarding
signals, reference the
`signal <http://docs.python.org/library/signal.html>`_ library.
:param signum: The signal number.
:param frame: The signal frame.
"""
def __init__(self, signum, frame):
msg = 'Caught signal %s' % signum
super(CaughtSignal, self).__init__(msg)
self.signum = signum
self.frame = frame
|
maciejkula/spotlight | refs/heads/master | tests/factorization/test_explicit.py | 1 | import os
import numpy as np
import pytest
from spotlight.cross_validation import random_train_test_split
from spotlight.datasets import movielens
from spotlight.evaluation import rmse_score
from spotlight.factorization.explicit import ExplicitFactorizationModel
from spotlight.factorization.representations import BilinearNet
from spotlight.layers import BloomEmbedding
RANDOM_STATE = np.random.RandomState(42)
CUDA = bool(os.environ.get('SPOTLIGHT_CUDA', False))
# Acceptable variation in specific test runs
EPSILON = .005
def test_regression():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ExplicitFactorizationModel(loss='regression',
n_iter=10,
batch_size=1024,
learning_rate=1e-3,
l2=1e-5,
use_cuda=CUDA)
model.fit(train)
rmse = rmse_score(model, test)
assert rmse - EPSILON < 1.0
def test_poisson():
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ExplicitFactorizationModel(loss='poisson',
n_iter=10,
batch_size=1024,
learning_rate=1e-3,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
rmse = rmse_score(model, test)
assert rmse - EPSILON < 1.0
def test_logistic():
interactions = movielens.get_movielens_dataset('100K')
# Convert to binary
interactions.ratings = (interactions.ratings > 3).astype(np.float32)
# Convert from (0, 1) to (-1, 1)
interactions.ratings = interactions.ratings * 2 - 1
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ExplicitFactorizationModel(loss='logistic',
n_iter=10,
batch_size=1024,
learning_rate=1e-3,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
rmse = rmse_score(model, test)
assert rmse - EPSILON < 1.05
def test_check_input():
# Train for single iter.
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
model = ExplicitFactorizationModel(loss='regression',
n_iter=1,
batch_size=1024,
learning_rate=1e-3,
l2=1e-6,
use_cuda=CUDA)
model.fit(train)
# Modify data to make imcompatible with original model.
train.user_ids[0] = train.user_ids.max() + 1
with pytest.raises(ValueError):
model.fit(train)
@pytest.mark.parametrize('compression_ratio, expected_rmse', [
(0.2, 1.5),
(0.5, 1.5),
(1.0, 1.5),
(1.5, 1.5),
(2.0, 1.5),
])
def test_bloom(compression_ratio, expected_rmse):
interactions = movielens.get_movielens_dataset('100K')
train, test = random_train_test_split(interactions,
random_state=RANDOM_STATE)
user_embeddings = BloomEmbedding(interactions.num_users, 32,
compression_ratio=compression_ratio,
num_hash_functions=2)
item_embeddings = BloomEmbedding(interactions.num_items, 32,
compression_ratio=compression_ratio,
num_hash_functions=2)
network = BilinearNet(interactions.num_users,
interactions.num_items,
user_embedding_layer=user_embeddings,
item_embedding_layer=item_embeddings)
model = ExplicitFactorizationModel(loss='regression',
n_iter=10,
batch_size=1024,
learning_rate=1e-2,
l2=1e-5,
representation=network,
use_cuda=CUDA)
model.fit(train)
print(model)
rmse = rmse_score(model, test)
print(rmse)
assert rmse - EPSILON < expected_rmse
|
jorsea/odoomrp-wip | refs/heads/8.0 | mrp_operations_extension/models/mrp_routing.py | 4 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, _
from openerp.addons import decimal_precision as dp
class MrpRouting(models.Model):
_inherit = 'mrp.routing'
@api.one
@api.constrains('workcenter_lines')
def _check_produce_operation(self):
if not self.workcenter_lines:
return
num_produce = sum([x.do_production for x in self.workcenter_lines])
if num_produce != 1:
raise Warning(_("There must be one and only one operation with "
"'Produce here' check marked."))
previous_operations_finished = fields.Boolean(
string='Previous operations finished')
class MrpRoutingWorkcenter(models.Model):
_inherit = 'mrp.routing.workcenter'
def get_routing_previous_operations(self):
self.previous_operations_finished = \
self.routing_id.previous_operations_finished
operation = fields.Many2one('mrp.routing.operation', string='Operation')
op_wc_lines = fields.One2many(
'mrp.operation.workcenter', 'routing_workcenter',
string='Possible work centers for this operation')
do_production = fields.Boolean(
string='Produce here',
help="If enabled, the production and movement to stock of the final "
"products will be done in this operation. There can be only one "
"operation per route with this check marked.")
previous_operations_finished = fields.Boolean(
string='Previous operations finished',
default="get_routing_previous_operations")
picking_type_id = fields.Many2one('stock.picking.type', 'Picking Type',
domain=[('code', '=', 'outgoing')])
@api.constrains('op_wc_lines')
def _check_default_op_wc_lines(self):
if not self.op_wc_lines:
return
num_default = len([x for x in self.op_wc_lines if x.default])
if num_default != 1:
raise Warning(
_('There must be one and only one line set as default.'))
@api.one
@api.onchange('operation')
def onchange_operation(self):
if self.operation:
self.name = self.operation.name
self.note = self.operation.description
op_wc_lst = []
is_default = True
for operation_wc in self.operation.workcenters:
data = {
'default': is_default,
'workcenter': operation_wc.id,
'capacity_per_cycle': operation_wc.capacity_per_cycle,
'time_efficiency': operation_wc.time_efficiency,
'time_cycle': operation_wc.time_cycle,
'time_start': operation_wc.time_start,
'time_stop': operation_wc.time_stop,
'op_number': self.operation.op_number,
}
op_wc_lst.append(data)
is_default = False
self.op_wc_lines = op_wc_lst
@api.one
@api.onchange('op_wc_lines')
def onchange_lines_default(self):
for line in self.op_wc_lines:
if line.default:
self.workcenter_id = line.workcenter
self.cycle_nbr = line.capacity_per_cycle
self.hour_nbr = line.time_cycle
break
class MrpOperationWorkcenter(models.Model):
_name = 'mrp.operation.workcenter'
_description = 'MRP Operation Workcenter'
workcenter = fields.Many2one(
'mrp.workcenter', string='Workcenter', required=True)
routing_workcenter = fields.Many2one(
'mrp.routing.workcenter', 'Routing workcenter', required=True)
time_efficiency = fields.Float('Efficiency factor')
capacity_per_cycle = fields.Float('Capacity per cycle')
time_cycle = fields.Float('Time for 1 cycle (hours)',
help="Time in hours for doing one cycle.")
time_start = fields.Float('Time before prod.',
help="Time in hours for the setup.")
time_stop = fields.Float('Time after prod.',
help="Time in hours for the cleaning.")
op_number = fields.Integer('# operators', default='0')
op_avg_cost = fields.Float(
string='Operator avg. hour cost',
digits=dp.get_precision('Product Price'))
default = fields.Boolean('Default')
@api.one
@api.onchange('workcenter')
def onchange_workcenter(self):
if self.workcenter:
self.capacity_per_cycle = self.workcenter.capacity_per_cycle
self.time_efficiency = self.workcenter.time_efficiency
self.time_cycle = self.workcenter.time_cycle
self.time_start = self.workcenter.time_start
self.time_stop = self.workcenter.time_stop
self.op_number = self.workcenter.op_number
self.op_avg_cost = self.workcenter.op_avg_cost
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.