repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Vixionar/django
|
refs/heads/master
|
tests/migrations/test_migrations_first/__init__.py
|
12133432
| |
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/core/cache/backends/__init__.py
|
12133432
| |
Udala/docforever
|
refs/heads/master
|
pycoin/__init__.py
|
12133432
| |
switchboardOp/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/__init__.py
|
167
|
# 2013, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Note: Do not add any code to this file. module_utils may be a namespace
# package when using Ansible-2.1+ Anything in this file may not be available
# if one of the other packages in the namespace is loaded first.
|
LudwigOrtmann/RIOT
|
refs/heads/master
|
tests/gnrc_ipv6_nib_6ln/tests/01-run.py
|
12
|
#!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
# Copyright (C) 2016 Takuo Yonezawa <Yonezawa-T2@mail.dnp.co.jp>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
def testfunc(child):
child.expect(r"OK \(\d+ tests\)")
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
from testrunner import run
sys.exit(run(testfunc))
|
sorenk/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aos/_aos_logical_device_map.py
|
28
|
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_logical_device_map
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Logical Device Map
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Logical Device Map module let you manage your Logical Device Map easily. You can create
create and delete Logical Device Map by Name, ID or by using a JSON File. This module
is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Logical Device Map to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the Logical Device Map to manage (can't be used to create a new Logical Device Map),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the Logical Device Map to manage. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value). Only one of I(name), I(id) or I(content) can be set.
state:
description:
- Indicate what is the expected state of the Logical Device Map (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Create an Logical Device Map with one subnet"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-logical-device-map"
state: present
- name: "Create an Logical Device Map with multiple subnets"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-other-logical-device-map"
state: present
- name: "Check if an Logical Device Map exist with same subnets by ID"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: present
- name: "Delete an Logical Device Map by name"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-logical-device-map"
state: absent
- name: "Delete an Logical Device Map by id"
aos_logical_device_map:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save an Logical Device Map to a file
- name: "Access Logical Device Map 1/3"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-logical-device-map"
state: present
register: logical_device_map
- name: "Save Logical Device Map into a file in JSON 2/3"
copy:
content: "{{ logical_device_map.value | to_nice_json }}"
dest: logical_device_map_saved.json
- name: "Save Logical Device Map into a file in YAML 3/3"
copy:
content: "{{ logical_device_map.value | to_nice_yaml }}"
dest: logical_device_map_saved.yaml
- name: "Load Logical Device Map from a JSON file"
aos_logical_device_map:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/logical_device_map_saved.json') }}"
state: present
- name: "Load Logical Device Map from a YAML file"
aos_logical_device_map:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/logical_device_map_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Logical Device Map
returned: always
type: str
sample: Server-IpAddrs
id:
description: AOS unique ID assigned to the Logical Device Map
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def logical_device_map_absent(module, aos, my_log_dev_map):
margs = module.params
# If the module do not exist, return directly
if my_log_dev_map.exists is False:
module.exit_json(changed=False, name=margs['name'], id='', value={})
# If not in check mode, delete Logical Device Map
if not module.check_mode:
try:
# Need to wait for 1sec before a delete to workaround a current
# limitation in AOS
time.sleep(1)
my_log_dev_map.delete()
except:
module.fail_json(msg="An error occurred, while trying to delete the Logical Device Map")
module.exit_json(changed=True,
name=my_log_dev_map.name,
id=my_log_dev_map.id,
value={})
def logical_device_map_present(module, aos, my_log_dev_map):
margs = module.params
# if content is defined, create object from Content
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.LogicalDeviceMaps, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if my_log_dev_map doesn't exist already, create a new one
if my_log_dev_map.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'Content' is mandatory for module that don't exist currently")
module.exit_json(changed=False,
name=my_log_dev_map.name,
id=my_log_dev_map.id,
value=my_log_dev_map.value)
#########################################################
# Main Function
#########################################################
def logical_device_map(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
# ----------------------------------------------------
# Find Object if available based on ID or Name
# ----------------------------------------------------
try:
my_log_dev_map = find_collection_item(aos.LogicalDeviceMaps,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the Logical Device Map based on name or ID, something went wrong")
# ----------------------------------------------------
# Proceed based on State value
# ----------------------------------------------------
if margs['state'] == 'absent':
logical_device_map_absent(module, aos, my_log_dev_map)
elif margs['state'] == 'present':
logical_device_map_present(module, aos, my_log_dev_map)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
content=dict(required=False, type="json"),
state=dict(required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
logical_device_map(module)
if __name__ == "__main__":
main()
|
nwjs/chromium.src
|
refs/heads/nw45-log
|
third_party/blink/renderer/bindings/scripts/web_idl/operation.py
|
1
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
from .argument import Argument
from .code_generator_info import CodeGeneratorInfo
from .composition_parts import WithCodeGeneratorInfo
from .composition_parts import WithComponent
from .composition_parts import WithDebugInfo
from .composition_parts import WithExposure
from .composition_parts import WithExtendedAttributes
from .composition_parts import WithOwner
from .composition_parts import WithOwnerMixin
from .exposure import Exposure
from .function_like import FunctionLike
from .function_like import OverloadGroup
from .idl_type import IdlType
from .make_copy import make_copy
class Operation(FunctionLike, WithExtendedAttributes, WithCodeGeneratorInfo,
WithExposure, WithOwner, WithOwnerMixin, WithComponent,
WithDebugInfo):
"""https://heycam.github.io/webidl/#idl-operations"""
class IR(FunctionLike.IR, WithExtendedAttributes, WithCodeGeneratorInfo,
WithExposure, WithOwnerMixin, WithComponent, WithDebugInfo):
def __init__(self,
identifier,
arguments,
return_type,
is_static=False,
extended_attributes=None,
component=None,
debug_info=None):
FunctionLike.IR.__init__(
self,
identifier=identifier,
arguments=arguments,
return_type=return_type,
is_static=is_static)
WithExtendedAttributes.__init__(self, extended_attributes)
WithCodeGeneratorInfo.__init__(self)
WithExposure.__init__(self)
WithOwnerMixin.__init__(self)
WithComponent.__init__(self, component)
WithDebugInfo.__init__(self, debug_info)
self.is_stringifier = False
def __init__(self, ir, owner):
assert isinstance(ir, Operation.IR)
FunctionLike.__init__(self, ir)
WithExtendedAttributes.__init__(self, ir, readonly=True)
WithCodeGeneratorInfo.__init__(self, ir, readonly=True)
WithExposure.__init__(self, ir, readonly=True)
WithOwner.__init__(self, owner)
WithOwnerMixin.__init__(self, ir)
WithComponent.__init__(self, ir, readonly=True)
WithDebugInfo.__init__(self, ir)
self._is_stringifier = ir.is_stringifier
@property
def is_stringifier(self):
return self._is_stringifier
class OperationGroup(OverloadGroup, WithExtendedAttributes,
WithCodeGeneratorInfo, WithExposure, WithOwner,
WithComponent, WithDebugInfo):
"""
Represents a group of operations with the same identifier.
The number of operations in this group may be 1 or 2+. In the latter case,
the operations are overloaded.
"""
class IR(OverloadGroup.IR, WithExtendedAttributes, WithCodeGeneratorInfo,
WithExposure, WithDebugInfo):
def __init__(self,
operations,
extended_attributes=None,
code_generator_info=None,
debug_info=None):
OverloadGroup.IR.__init__(self, operations)
WithExtendedAttributes.__init__(self, extended_attributes)
WithCodeGeneratorInfo.__init__(self, code_generator_info)
WithExposure.__init__(self)
WithDebugInfo.__init__(self, debug_info)
def __init__(self, ir, operations, owner):
assert isinstance(ir, OperationGroup.IR)
assert isinstance(operations, (list, tuple))
assert all(
isinstance(operation, Operation) for operation in operations)
assert all(
operation.identifier == ir.identifier for operation in operations)
components = functools.reduce(
lambda s, operation: s.union(operation.components), operations,
set())
ir = make_copy(ir)
OverloadGroup.__init__(self, functions=operations)
WithExtendedAttributes.__init__(self, ir, readonly=True)
WithCodeGeneratorInfo.__init__(self, ir, readonly=True)
WithExposure.__init__(self, ir, readonly=True)
WithOwner.__init__(self, owner)
WithComponent.__init__(self, sorted(components))
WithDebugInfo.__init__(self, ir)
|
roselleebarle04/django
|
refs/heads/master
|
django/views/static.py
|
300
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
sirpercival/kivy
|
refs/heads/master
|
examples/tutorials/notes/final/main.py
|
16
|
'''
Notes
=====
Simple application for reading/writing notes.
'''
__version__ = '1.0'
import json
from os.path import join, exists
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition
from kivy.properties import ListProperty, StringProperty, \
NumericProperty, BooleanProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
class MutableTextInput(FloatLayout):
text = StringProperty()
multiline = BooleanProperty(True)
def __init__(self, **kwargs):
super(MutableTextInput, self).__init__(**kwargs)
Clock.schedule_once(self.prepare, 0)
def prepare(self, *args):
self.w_textinput = self.ids.w_textinput.__self__
self.w_label = self.ids.w_label.__self__
self.view()
def on_touch_down(self, touch):
if self.collide_point(*touch.pos) and touch.is_double_tap:
self.edit()
return True
return super(MutableTextInput, self).on_touch_down(touch)
def edit(self):
self.clear_widgets()
self.add_widget(self.w_textinput)
self.w_textinput.focus = True
def view(self):
self.clear_widgets()
self.add_widget(self.w_label)
def check_focus_and_view(self, textinput):
if not textinput.focus:
self.text = textinput.text
self.view()
class NoteView(Screen):
note_index = NumericProperty()
note_title = StringProperty()
note_content = StringProperty()
class NoteListItem(BoxLayout):
note_title = StringProperty()
note_index = NumericProperty()
class Notes(Screen):
data = ListProperty()
def args_converter(self, row_index, item):
return {
'note_index': row_index,
'note_content': item['content'],
'note_title': item['title']}
class NoteApp(App):
def build(self):
self.notes = Notes(name='notes')
self.load_notes()
self.transition = SlideTransition(duration=.35)
root = ScreenManager(transition=self.transition)
root.add_widget(self.notes)
return root
def load_notes(self):
if not exists(self.notes_fn):
return
with open(self.notes_fn, 'rb') as fd:
data = json.load(fd)
self.notes.data = data
def save_notes(self):
with open(self.notes_fn, 'wb') as fd:
json.dump(self.notes.data, fd)
def del_note(self, note_index):
del self.notes.data[note_index]
self.save_notes()
self.refresh_notes()
self.go_notes()
def edit_note(self, note_index):
note = self.notes.data[note_index]
name = 'note{}'.format(note_index)
if self.root.has_screen(name):
self.root.remove_widget(self.root.get_screen(name))
view = NoteView(
name=name,
note_index=note_index,
note_title=note.get('title'),
note_content=note.get('content'))
self.root.add_widget(view)
self.transition.direction = 'left'
self.root.current = view.name
def add_note(self):
self.notes.data.append({'title': 'New note', 'content': ''})
note_index = len(self.notes.data) - 1
self.edit_note(note_index)
def set_note_content(self, note_index, note_content):
self.notes.data[note_index]['content'] = note_content
data = self.notes.data
self.notes.data = []
self.notes.data = data
self.save_notes()
self.refresh_notes()
def set_note_title(self, note_index, note_title):
self.notes.data[note_index]['title'] = note_title
self.save_notes()
self.refresh_notes()
def refresh_notes(self):
data = self.notes.data
self.notes.data = []
self.notes.data = data
def go_notes(self):
self.transition.direction = 'right'
self.root.current = 'notes'
@property
def notes_fn(self):
return join(self.user_data_dir, 'notes.json')
if __name__ == '__main__':
NoteApp().run()
|
taknevski/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/python/ops/confusion_matrix.py
|
59
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related utilities.
@@remove_squeezable_dimensions
@@confusion_matrix
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[labels, predictions]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == expected_rank_diff + 1:
predictions = array_ops.squeeze(predictions, [-1])
elif rank_diff == expected_rank_diff - 1:
labels = array_ops.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(expected_rank_diff + 1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(expected_rank_diff - 1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Computes the confusion matrix from predictions and labels.
Calculate the Confusion Matrix for a pair of prediction and
label 1-D int arrays.
The matrix columns represent the prediction labels and the rows represent the
real labels. The confusion matrix is always a 2-D array of shape `[n, n]`,
where `n` is the number of valid labels for a given classification task. Both
prediction and labels must be 1-D arrays of the same shape in order for this
function to work.
If `num_classes` is None, then `num_classes` will be set to the one plus
the maximum value in either predictions or labels.
Class labels are expected to start at 0. E.g., if `num_classes` was
three, then the possible labels would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.contrib.metrics.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D `Tensor` of real labels for the classification task.
predictions: 1-D `Tensor` of predictions for a given classification.
num_classes: The possible number of labels the classification task can
have. If this value is not provided, it will be calculated
using both predictions and labels array.
dtype: Data type of the confusion matrix.
name: Scope name.
weights: An optional `Tensor` whose shape matches `predictions`.
Returns:
A k X k matrix representing the confusion matrix, where k is the number of
possible labels in the classification task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
with ops.name_scope(name, 'confusion_matrix',
(predictions, labels, num_classes, weights)) as name:
labels, predictions = remove_squeezable_dimensions(
ops.convert_to_tensor(labels, name='labels'),
ops.convert_to_tensor(
predictions, name='predictions'))
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
if num_classes is None:
num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
math_ops.reduce_max(labels)) + 1
if weights is not None:
predictions.get_shape().assert_is_compatible_with(weights.get_shape())
weights = math_ops.cast(weights, dtype)
shape = array_ops.stack([num_classes, num_classes])
indices = array_ops.transpose(array_ops.stack([labels, predictions]))
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = sparse_tensor.SparseTensor(
indices=indices, values=values, dense_shape=math_ops.to_int64(shape))
zero_matrix = array_ops.zeros(math_ops.to_int32(shape), dtype)
return sparse_ops.sparse_add(zero_matrix, cm_sparse)
|
mvaled/sentry
|
refs/heads/master
|
src/sentry/south_migrations/0196_auto__del_field_organization_owner.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Organization.owner'
db.delete_column(u'sentry_organization', 'owner_id')
def backwards(self, orm):
# Adding field 'Organization.owner'
db.add_column(
u'sentry_organization',
'owner',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User'], null=True
),
keep_default=False
)
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'),)"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
tecip-nes/pyot
|
refs/heads/master
|
contiki-tres/apps/pymite/tools/pmOdDecoder.py
|
2
|
#!/usr/bin/env python
# This file is Copyright 2009 Dean Hall.
# This file is part of the Python-on-a-Chip tools.
# This software is licensed under the MIT License.
# See the LICENSE file for details.
"""
PyMite Object Descriptor Decoder
================================
Decodes an object descriptor value into its bit fields.
"""
## @file
# @copybrief pmOldDecoder
## @package pmOldDecoder
# @brief PyMite Object Descriptor Decoder
#
# Decodes an object descriptor value into its bit fields.
import sys
__usage__ = """USAGE:
./pmOdDecoder.py odvalue
"""
TYPES = (
'OBJ_TYPE_NON',
'OBJ_TYPE_INT',
'OBJ_TYPE_FLT',
'OBJ_TYPE_STR',
'OBJ_TYPE_TUP',
'OBJ_TYPE_COB',
'OBJ_TYPE_MOD',
'OBJ_TYPE_CLO',
'OBJ_TYPE_FXN',
'OBJ_TYPE_CLI',
'OBJ_TYPE_CIM',
'OBJ_TYPE_NIM',
'OBJ_TYPE_NOB',
'OBJ_TYPE_THR',
0x0E,
'OBJ_TYPE_BOOL',
'OBJ_TYPE_CIO',
'OBJ_TYPE_MTH',
'OBJ_TYPE_LST',
'OBJ_TYPE_DIC',
0x14,0x15,0x16,0x17,0x18,
'OBJ_TYPE_FRM',
'OBJ_TYPE_BLK',
'OBJ_TYPE_SEG',
'OBJ_TYPE_SGL',
'OBJ_TYPE_SQI',
'OBJ_TYPE_NFM',
)
def od_decode(odvalue):
if odvalue & 0x0002:
return {
"val": odvalue,
"size": odvalue & 0xFFFC,
"type": "free",
"free": (odvalue & 0x0002) >> 1,
"mark": odvalue & 0x0001, # Reserved bit
}
return {
"val": odvalue,
"size": odvalue & 0x07FC,
"type": TYPES[(odvalue & 0xF800) >> 11],
"free": (odvalue & 0x0002) >> 1,
"mark": odvalue & 0x0001,
}
def to_int(s):
if s.startswith("0x"):
return int(s, 16)
return int(s)
def print_od(od):
print("%(val)d (0x%(val)04x): %(type)s[%(size)d], f=%(free)d, m=%(mark)d"
% od)
def main():
odvalues = sys.argv[1:]
odvalues = map(to_int, odvalues)
ods = map(od_decode, odvalues)
map(print_od, ods)
if __name__ == "__main__":
main()
|
cmwaura/Newspade
|
refs/heads/master
|
manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "analyst.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
aamalev/aioworkers
|
refs/heads/master
|
aioworkers/storage/filesystem.py
|
1
|
import hashlib
import os
import pathlib
import shutil
import tempfile
from functools import partial
from pathlib import Path, PurePath
from .. import humanize
from ..core.base import AbstractNestedEntity, ExecutorEntity
from ..core.formatter import FormattedEntity
from . import StorageError, base
__all__ = (
'AsyncPath',
'FileSystemStorage',
'HashFileSystemStorage',
'NestedFileSystemStorage',
)
def flat(parts):
if isinstance(parts, str):
if os.path.isabs(parts):
raise ValueError('Path must be relative. '
'[{}]'.format(parts))
yield parts
elif isinstance(parts, PurePath):
if parts.is_absolute():
raise ValueError('Path must be relative. '
'[{}]'.format(parts))
yield parts
elif isinstance(parts, (list, tuple)):
for p in parts:
yield from flat(p)
else:
raise TypeError(
'Key must be relative path [str or Path]. '
'But {}'.format(parts))
class AsyncFile:
def __init__(self, fd, storage=None):
self.fd = fd
self.storage = storage
self._closed = False
async def read(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.fd.read, *args, **kwargs
)
async def write(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.fd.write, *args, **kwargs
)
async def __aenter__(self):
assert not self._closed
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def close(self):
assert not self._closed
await self.storage.run_in_executor(self.fd.close)
await self.storage.next_space_waiter()
def __aiter__(self):
return self
async def __anext__(self):
result = await self.storage.run_in_executor(next, self.fd, None)
if result is None:
raise StopAsyncIteration()
else:
return result
class AsyncFileContextManager:
def __init__(self, path, *args, **kwargs):
self.path = path
self.af = None
if 'mode' in kwargs:
self.mode = kwargs['mode']
elif len(args) > 1:
self.mode = args[1]
else:
self.mode = 'r'
self._constructor = partial(*args, **kwargs)
async def __aenter__(self):
assert self.af is None, "File already opened"
path = self.path
storage = path.storage
await storage.wait_free_space()
if 'w' in self.mode or '+' in self.mode:
await path.parent.mkdir(parents=True, exist_ok=True)
fd = await storage.run_in_executor(self._constructor)
self.af = AsyncFile(fd, storage)
return self.af
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.af.close()
self.af = None
def __await__(self):
return self.__aenter__().__await__()
class AsyncGlob:
def __init__(self, path, pattern):
self._factory = type(path)
self._iter = path.path.glob(pattern)
self.storage = path.storage
def __aiter__(self):
return self
async def __anext__(self):
result = await self.storage.run_in_executor(next, self._iter, None)
if result is None:
raise StopAsyncIteration()
else:
return self._factory(result, storage=self.storage)
class AsyncPath(PurePath):
def __new__(cls, *args, storage=None):
if cls is AsyncPath:
cls = AsyncWindowsPath if os.name == 'nt' else AsyncPosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
if storage is None:
for i in args:
if isinstance(i, AsyncPath):
storage = i.storage
break
self._init(storage=storage)
return self
def _init(self, storage=None):
if storage:
self.storage = storage
else:
self.storage = MockFileSystemStorage()
self.path = Path(self)
async def exists(self) -> bool:
return await self.storage.run_in_executor(self.path.exists)
async def mkdir(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.path.mkdir, *args, **kwargs
)
async def stat(self) -> os.stat_result:
return await self.storage.run_in_executor(self.path.stat)
async def unlink(self):
return await self.storage.run_in_executor(self.path.unlink)
async def read_text(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.path.read_text, *args, **kwargs
)
async def write_text(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.path.write_text, *args, **kwargs
)
async def read_bytes(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.path.read_bytes, *args, **kwargs
)
async def write_bytes(self, *args, **kwargs):
return await self.storage.run_in_executor(
self.path.write_bytes, *args, **kwargs
)
def _make_child(self, args):
k = super()._make_child(args)
k._init(self.storage)
return k
@classmethod
def _from_parts(cls, args, init=True):
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
storage = None
for t in args:
if isinstance(t, AsyncPath):
storage = t.storage
break
self._init(storage=storage)
return self
def open(self, *args, **kwargs):
return AsyncFileContextManager(
self, self.path.open, *args, **kwargs)
@property
def parent(self):
p = super().parent
p._init(self.storage)
return p
@property
def normpath(self):
return type(self)(
os.path.normpath(str(self)),
storage=self.storage)
def glob(self, pattern):
return AsyncGlob(self, pattern)
class AsyncPosixPath(AsyncPath, pathlib.PurePosixPath):
pass
class AsyncWindowsPath(AsyncPath, pathlib.PureWindowsPath):
pass
class MockFileSystemStorage(ExecutorEntity):
def set_config(self, config):
cfg = config.new_child(executor=1)
super().set_config(cfg)
@property
def loop(self):
if self._loop is None:
self._loop = __import__('asyncio').get_event_loop()
return self._loop
async def next_space_waiter(self):
pass
class BaseFileSystemStorage(
AbstractNestedEntity,
ExecutorEntity,
FormattedEntity,
base.AbstractStorage):
PARAM_LIMIT_FREE_SPACE = 'limit_free_space'
def set_config(self, config):
super().set_config(config)
self._space_waiters = []
self._path = AsyncPath(self.config.path, storage=self)
self._tmp = self.config.get('tmp') or self.config.path
self._limit = self._config.get(self.PARAM_LIMIT_FREE_SPACE)
if isinstance(self._limit, int):
self._limit = self._limit << 20 # int in MB
elif isinstance(self._limit, str):
self._limit = humanize.parse_size(self._limit)
def factory(self, item, config=None):
path = self._path.joinpath(*flat(item)).normpath
simple_item = path.relative_to(self._path)
inst = super().factory(simple_item, config)
for i in (
'_formatter',
'_space_waiters',
'_executor',
'_tmp',
'_limit',
):
setattr(inst, i, getattr(self, i))
inst._path = path
return inst
def disk_usage(self):
def disk_usage(path):
try:
return shutil.disk_usage(path)
except FileNotFoundError:
os.makedirs(path, exist_ok=True)
return shutil.disk_usage(path)
return self.run_in_executor(disk_usage, self._config.path)
async def get_free_space(self):
du = await self.disk_usage()
return du.free
async def wait_free_space(self, size=None):
if not self._limit:
return
free = await self.get_free_space()
if size is None or free < size + self._limit:
f = self.loop.create_future()
self._space_waiters.append((f, size))
await f
async def next_space_waiter(self):
if not self._limit:
return
free = await self.get_free_space()
for fsize in self._space_waiters:
f, size = fsize
if free > (size or 0) + self._limit:
f.set_result(None)
to_del = fsize
break
else:
return
self._space_waiters.remove(to_del)
def _write(self, key: Path, value):
d = key.parent
if d.exists():
pass
elif value is None:
return
else:
d.mkdir(parents=True)
if value is not None:
with tempfile.NamedTemporaryFile(
dir=self._tmp,
delete=False) as f:
source = f.name
f.write(value)
shutil.move(source, str(key))
elif not key.exists():
pass
elif key.is_dir():
shutil.rmtree(str(key))
else:
with tempfile.NamedTemporaryFile(
dir=self._tmp) as f:
shutil.move(str(key), f.name)
def path_transform(self, rel_path: str):
return rel_path
def raw_key(self, *key):
rel = os.path.normpath(str(PurePath(*flat(key))))
path = self._path.joinpath(self.path_transform(rel)).normpath
if path.relative_to(self._path) == '.':
raise ValueError('Access denied: %s' % path)
return path
async def set(self, key, value):
if value is not None:
value = self.encode(value)
await self.wait_free_space(len(value))
k = self.raw_key(key).path
try:
await self.run_in_executor(self._write, k, value)
except OSError as e:
raise StorageError(str(e)) from e
await self.next_space_waiter()
async def get(self, key):
k = self.raw_key(key)
if await k.exists():
v = await k.read_bytes()
return self.decode(v)
def open(self, key, *args, **kwargs):
return self.raw_key(key).open(*args, **kwargs)
def _copy(self, key_source, storage_dest, key_dest, copy_func):
s = self.raw_key(key_source).path
d = storage_dest.raw_key(key_dest).path
target_dir = d.parent
target_dir.mkdir(parents=True, exist_ok=True)
if s.exists():
copy_func(str(s), str(d))
elif not d.exists():
return False
else:
d.unlink()
return True
def copy(self, key_source, storage_dest, key_dest):
if isinstance(storage_dest, FileSystemStorage):
return self.run_in_executor(
self._copy, key_source,
storage_dest, key_dest, shutil.copy)
return super().copy(key_source, storage_dest, key_dest)
def move(self, key_source, storage_dest, key_dest):
if isinstance(storage_dest, FileSystemStorage):
return self.run_in_executor(
self._copy, key_source,
storage_dest, key_dest, shutil.move)
return super().move(key_source, storage_dest, key_dest)
def __repr__(self):
cls = type(self)
props = []
if self.config:
props.append(('path', self._path))
if self.config.get('executor'):
props.append(('c', self.config.executor))
return '<{}.{} {}>'.format(
cls.__module__, cls.__qualname__,
' '.join(map('{0[0]}={0[1]}'.format, props)))
class FileSystemStorage(
BaseFileSystemStorage,
base.AbstractListedStorage):
def list(self, glob='*'):
base = self._path
g = base.path.glob(glob)
return self.run_in_executor(
list, map(lambda x: x.relative_to(base), g)
)
async def length(self, glob='*'):
return len(await self.list(glob))
class NestedFileSystemStorage(BaseFileSystemStorage):
def path_transform(self, rel_path: str):
return os.path.join(rel_path[:2], rel_path[2:4], rel_path)
class HashFileSystemStorage(NestedFileSystemStorage):
def path_transform(self, rel_path: str):
ext = os.path.splitext(rel_path)[-1]
hash = hashlib.md5()
hash.update(rel_path.encode())
d = hash.hexdigest() + ext
return super().path_transform(d)
|
ideadevice/alembic
|
refs/heads/master
|
alembic/templates/generic/env.py
|
71
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
qiaofuhui/zerorpc-python
|
refs/heads/master
|
tests/test_pubpush.py
|
102
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent
import gevent.event
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_pushpull_inheritance():
endpoint = random_ipc_endpoint()
pusher = zerorpc.Pusher()
pusher.bind(endpoint)
trigger = gevent.event.Event()
class Puller(zerorpc.Puller):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
puller = Puller()
puller.connect(endpoint)
gevent.spawn(puller.run)
trigger.clear()
pusher.lolita(1, 2)
trigger.wait()
print 'done'
def test_pubsub_inheritance():
endpoint = random_ipc_endpoint()
publisher = zerorpc.Publisher()
publisher.bind(endpoint)
trigger = gevent.event.Event()
class Subscriber(zerorpc.Subscriber):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
subscriber = Subscriber()
subscriber.connect(endpoint)
gevent.spawn(subscriber.run)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in xrange(0, 10):
publisher.lolita(1, 2)
if trigger.wait(0.2):
print 'done'
return
raise RuntimeError("The subscriber didn't receive any published message")
def test_pushpull_composite():
endpoint = random_ipc_endpoint()
trigger = gevent.event.Event()
class Puller(object):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
pusher = zerorpc.Pusher()
pusher.bind(endpoint)
service = Puller()
puller = zerorpc.Puller(service)
puller.connect(endpoint)
gevent.spawn(puller.run)
trigger.clear()
pusher.lolita(1, 2)
trigger.wait()
print 'done'
def test_pubsub_composite():
endpoint = random_ipc_endpoint()
trigger = gevent.event.Event()
class Subscriber(object):
def lolita(self, a, b):
print 'lolita', a, b
assert a + b == 3
trigger.set()
publisher = zerorpc.Publisher()
publisher.bind(endpoint)
service = Subscriber()
subscriber = zerorpc.Subscriber(service)
subscriber.connect(endpoint)
gevent.spawn(subscriber.run)
trigger.clear()
# We need this retry logic to wait that the subscriber.run coroutine starts
# reading (the published messages will go to /dev/null until then).
for attempt in xrange(0, 10):
publisher.lolita(1, 2)
if trigger.wait(0.2):
print 'done'
return
raise RuntimeError("The subscriber didn't receive any published message")
|
proyectos-analizo-info/pybossa-analizo-info
|
refs/heads/master
|
test/helper/web.py
|
1
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test, db, Fixtures, with_context
from helper.user import User
from pybossa.model.app import App
from pybossa.model.category import Category
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
class Helper(Test):
"""Class to help testing the web interface"""
user = User()
def html_title(self, title=None):
"""Helper function to create an HTML title"""
if title is None:
return "<title>PyBossa</title>"
else:
return "<title>PyBossa · %s</title>" % title
def register(self, method="POST", fullname="John Doe", name="johndoe",
password="p4ssw0rd", password2=None, email=None):
"""Helper function to register and sign in a user"""
if password2 is None:
password2 = password
if email is None:
email = name + '@example.com'
if method == "POST":
return self.app.post('/account/register',
data={
'fullname': fullname,
'name': name,
'email_addr': email,
'password': password,
'confirm': password2},
follow_redirects=True)
else:
return self.app.get('/account/register', follow_redirects=True)
def signin(self, method="POST", email="johndoe@example.com", password="p4ssw0rd",
next=None):
"""Helper function to sign in current user"""
url = '/account/signin'
if next is not None:
url = url + '?next=' + next
if method == "POST":
return self.app.post(url, data={'email': email,
'password': password},
follow_redirects=True)
else:
return self.app.get(url, follow_redirects=True)
def profile(self, name="johndoe"):
"""Helper function to check profile of signed in user"""
url = "/account/%s" % name
return self.app.get(url, follow_redirects=True)
def update_profile(self, method="POST", id=1, fullname="John Doe",
name="johndoe", locale="es",
email_addr="johndoe@example.com",
new_name=None,
btn='Profile'):
"""Helper function to update the profile of users"""
url = "/account/%s/update" % name
if new_name:
name = new_name
if (method == "POST"):
return self.app.post(url,
data={'id': id,
'fullname': fullname,
'name': name,
'locale': locale,
'email_addr': email_addr,
'btn': btn},
follow_redirects=True)
else:
return self.app.get(url,
follow_redirects=True)
def signout(self):
"""Helper function to sign out current user"""
return self.app.get('/account/signout', follow_redirects=True)
def create_categories(self):
with self.flask_app.app_context():
categories = db.session.query(Category).all()
if len(categories) == 0:
print "Categories 0"
print "Creating default ones"
self._create_categories()
def new_application(self, method="POST", name="Sample Project",
short_name="sampleapp", description="Description",
long_description=u'Long Description\n================'):
"""Helper function to create a project"""
if method == "POST":
self.create_categories()
return self.app.post("/app/new", data={
'name': name,
'short_name': short_name,
'description': description,
'long_description': long_description,
}, follow_redirects=True)
else:
return self.app.get("/app/new", follow_redirects=True)
def new_task(self, appid):
"""Helper function to create tasks for a project"""
tasks = []
for i in range(0, 10):
tasks.append(Task(app_id=appid, state='0', info={}))
db.session.add_all(tasks)
db.session.commit()
def delete_task_runs(self, app_id=1):
"""Deletes all TaskRuns for a given app_id"""
db.session.query(TaskRun).filter_by(app_id=1).delete()
db.session.commit()
def task_settings_scheduler(self, method="POST", short_name='sampleapp',
sched="default"):
"""Helper function to modify task scheduler"""
url = "/app/%s/tasks/scheduler" % short_name
if method == "POST":
return self.app.post(url, data={
'sched': sched,
}, follow_redirects=True)
else:
return self.app.get(url, follow_redirects=True)
def task_settings_redundancy(self, method="POST", short_name='sampleapp',
n_answers=30):
"""Helper function to modify task redundancy"""
url = "/app/%s/tasks/redundancy" % short_name
if method == "POST":
return self.app.post(url, data={
'n_answers': n_answers,
}, follow_redirects=True)
else:
return self.app.get(url, follow_redirects=True)
def task_settings_priority(self, method="POST", short_name='sampleapp',
task_ids="1", priority_0=0.0):
"""Helper function to modify task redundancy"""
url = "/app/%s/tasks/priority" % short_name
if method == "POST":
return self.app.post(url, data={
'task_ids': task_ids,
'priority_0': priority_0
}, follow_redirects=True)
else:
return self.app.get(url, follow_redirects=True)
def delete_application(self, method="POST", short_name="sampleapp"):
"""Helper function to delete a project"""
if method == "POST":
return self.app.post("/app/%s/delete" % short_name,
follow_redirects=True)
else:
return self.app.get("/app/%s/delete" % short_name,
follow_redirects=True)
def update_application(self, method="POST", short_name="sampleapp", id=1,
new_name="Sample Project", new_short_name="sampleapp",
new_description="Description",
new_allow_anonymous_contributors="False",
new_category_id="2",
new_long_description="Long desc",
new_sched="random",
new_hidden=False):
"""Helper function to update a project"""
if method == "POST":
if new_hidden:
return self.app.post("/app/%s/update" % short_name,
data={
'id': id,
'name': new_name,
'short_name': new_short_name,
'description': new_description,
'allow_anonymous_contributors': new_allow_anonymous_contributors,
'category_id': new_category_id,
'long_description': new_long_description,
'sched': new_sched,
'hidden': new_hidden,
'btn': 'Save'},
follow_redirects=True)
else:
return self.app.post("/app/%s/update" % short_name,
data={'id': id, 'name': new_name,
'short_name': new_short_name,
'allow_anonymous_contributors': new_allow_anonymous_contributors,
'category_id': new_category_id,
'long_description': new_long_description,
'sched': new_sched,
'description': new_description,
'btn': 'Save'
},
follow_redirects=True)
else:
return self.app.get("/app/%s/update" % short_name,
follow_redirects=True)
|
Zlash65/erpnext
|
refs/heads/develop
|
erpnext/accounts/doctype/pricing_rule_item_code/pricing_rule_item_code.py
|
9
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PricingRuleItemCode(Document):
pass
|
jordan8037310/CouchPotatoServer
|
refs/heads/master
|
libs/pio/__init__.py
|
12133432
| |
leilihh/novaha
|
refs/heads/kvm_ha_tmp
|
nova/tests/integrated/v3/test_deferred_delete.py
|
23
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-deferred-delete"
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
def test_force_delete(self):
uuid = self._post_server()
response = self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status, 202)
self.assertEqual(response.read(), '')
|
ksomemo/ciserviceex
|
refs/heads/master
|
ciserviceex/version.py
|
7
|
__version__ = '0.0.1'
|
webmasterraj/FogOrNot
|
refs/heads/master
|
flask/lib/python2.7/site-packages/numpy/distutils/compat.py
|
264
|
"""Small modules to cope with python 2 vs 3 incompatibilities inside
numpy.distutils
"""
from __future__ import division, absolute_import, print_function
import sys
def get_exception():
return sys.exc_info()[1]
|
google-research/google-research
|
refs/heads/master
|
spreadsheet_coder/bert_modeling.py
|
1
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT's modeling.py adapted to SpreadsheetCoder.
This code is modified from BERT's modeling.py
to better support variable scope re-use.
The main BERT model and related functions.
"""
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow.compat.v1 as tf
import tf_slim as slim
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
# xinyunchen: in the following line, I added "reuse=tf.AUTO_REUSE".
with tf.variable_scope(scope, default_name="bert", reuse=tf.AUTO_REUSE):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return slim.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
# Create the variable outside the assertion to avoid TF2 compatibility
# issues.
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def dense_layer_3d(input_tensor,
num_attention_heads,
size_per_head,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
size_per_head: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
last_dim = get_shape_list(input_tensor)[-1]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[last_dim, num_attention_heads * size_per_head],
initializer=initializer)
w = tf.reshape(w, [last_dim, num_attention_heads, size_per_head])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * size_per_head],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, size_per_head])
ret = tf.einsum("abc,cde->abde", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFNH,NHD->BFD", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
last_dim = get_shape_list(input_tensor)[-1]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel", shape=[last_dim, output_size], initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("abc,cd->abd", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with tf.einsum as follows:
Input_tensor: [BFD]
Wq, Wk, Wv: [DNH]
Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)
K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)
V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)
attention_scores:[BNFT] = einsum('BFNH,BTNH>BNFT', Q, K) / sqrt(H)
attention_probs:[BNFT] = softmax(attention_scores)
context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)
Wout:[DNH]
Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
query_layer = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act,
"query")
# `key_layer` = [B, T, N, H]
key_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act,
"key")
# `value_layer` = [B, T, N, H]
value_layer = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act,
"value")
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_layer, query_layer)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `context_layer` = [B, F, N, H]
context_layer = tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_layer)
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
prev_output = input_tensor
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output, hidden_size,
num_attention_heads, attention_head_size,
create_initializer(initializer_range), None, "dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output, intermediate_size,
create_initializer(initializer_range), intermediate_act_fn, "dense")
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = dense_layer_2d(intermediate_output, hidden_size,
create_initializer(initializer_range),
None, "dense")
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
# Tensor.name is not supported in Eager mode.
if tf.executing_eagerly():
name = "get_shape_list"
else:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
ysekky/chainer
|
refs/heads/master
|
tests/chainer_tests/training_tests/test_updater.py
|
4
|
import unittest
import mock
import numpy
import chainer
from chainer import dataset
from chainer import testing
from chainer import training
class DummyIterator(dataset.Iterator):
epoch = 1
is_new_epoch = True
def __init__(self, next_data):
self.finalize_called = 0
self.next_called = 0
self.next_data = next_data
self.serialize_called = []
def finalize(self):
self.finalize_called += 1
def __next__(self):
self.next_called += 1
return self.next_data
def serialize(self, serializer):
self.serialize_called.append(serializer)
class DummyOptimizer(chainer.Optimizer):
def __init__(self):
self.update = mock.MagicMock()
self.serialize_called = []
def serialize(self, serializer):
self.serialize_called.append(serializer)
class DummySerializer(chainer.Serializer):
def __init__(self, path=[]):
self.path = path
self.called = []
def __getitem__(self, key):
return DummySerializer(self.path + [key])
def __call__(self, key, value):
self.called.append((key, value))
class TestUpdater(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
self.iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
self.optimizer = DummyOptimizer()
self.optimizer.setup(self.target)
self.updater = training.StandardUpdater(self.iterator, self.optimizer)
def test_init_values(self):
self.assertIsNone(self.updater.device)
self.assertIsNone(self.updater.loss_func)
self.assertEqual(self.updater.iteration, 0)
def test_epoch(self):
self.assertEqual(self.updater.epoch, 1)
def test_new_epoch(self):
self.assertTrue(self.updater.is_new_epoch)
def test_get_iterator(self):
self.assertIs(self.updater.get_iterator('main'), self.iterator)
def test_get_optimizer(self):
self.assertIs(self.updater.get_optimizer('main'), self.optimizer)
def test_get_all_optimizers(self):
self.assertEqual(self.updater.get_all_optimizers(),
{'main': self.optimizer})
def test_update(self):
self.updater.update()
self.assertEqual(self.updater.iteration, 1)
self.assertEqual(self.iterator.next_called, 1)
def test_finalizer(self):
self.updater.finalize()
self.assertEqual(self.iterator.finalize_called, 1)
def test_serialize(self):
serializer = DummySerializer()
self.updater.serialize(serializer)
self.assertEqual(len(self.iterator.serialize_called), 1)
self.assertEqual(self.iterator.serialize_called[0].path,
['iterator:main'])
self.assertEqual(len(self.optimizer.serialize_called), 1)
self.assertEqual(
self.optimizer.serialize_called[0].path, ['optimizer:main'])
self.assertEqual(serializer.called, [('iteration', 0)])
class TestUpdaterUpdateArguments(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
self.optimizer = DummyOptimizer()
self.optimizer.setup(self.target)
def test_update_tuple(self):
iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
updater = training.StandardUpdater(iterator, self.optimizer)
updater.update_core()
self.assertEqual(self.optimizer.update.call_count, 1)
args, kwargs = self.optimizer.update.call_args
self.assertEqual(len(args), 3)
loss, v1, v2 = args
self.assertEqual(len(kwargs), 0)
self.assertIs(loss, self.optimizer.target)
self.assertIsInstance(v1, numpy.ndarray)
self.assertEqual(v1, 1)
self.assertIsInstance(v2, numpy.ndarray)
self.assertEqual(v2, 2)
self.assertEqual(iterator.next_called, 1)
def test_update_dict(self):
iterator = DummyIterator([{'x': numpy.array(1), 'y': numpy.array(2)}])
updater = training.StandardUpdater(iterator, self.optimizer)
updater.update_core()
self.assertEqual(self.optimizer.update.call_count, 1)
args, kwargs = self.optimizer.update.call_args
self.assertEqual(len(args), 1)
loss, = args
self.assertEqual(set(kwargs.keys()), {'x', 'y'})
v1 = kwargs['x']
v2 = kwargs['y']
self.assertIs(loss, self.optimizer.target)
self.assertIsInstance(v1, numpy.ndarray)
self.assertEqual(v1, 1)
self.assertIsInstance(v2, numpy.ndarray)
self.assertEqual(v2, 2)
self.assertEqual(iterator.next_called, 1)
def test_update_var(self):
iterator = DummyIterator([numpy.array(1)])
updater = training.StandardUpdater(iterator, self.optimizer)
updater.update_core()
self.assertEqual(self.optimizer.update.call_count, 1)
args, kwargs = self.optimizer.update.call_args
self.assertEqual(len(args), 2)
loss, v1 = args
self.assertEqual(len(kwargs), 0)
self.assertIs(loss, self.optimizer.target)
self.assertIsInstance(v1, numpy.ndarray)
self.assertEqual(v1, 1)
self.assertEqual(iterator.next_called, 1)
testing.run_module(__name__, __file__)
|
luser/socorro
|
refs/heads/master
|
scripts/truncate_56.py
|
14
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import psycopg2, psycopg2.extensions
# this script truncates the database down to 56 to 62 days of data
# for use in staging and/or dev environments
conn = psycopg2.connect("dbname=breakpad user=postgres")
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("""
SET work_mem = '128 MB'
""")
cur.execute("""
SET maintenance_work_mem = '256 MB'
""")
cur.execute("""
SET temp_buffers = '128 MB'
""")
# get a list of reports partitions older than 62 days
cur.execute("""
select relname from pg_stat_user_tables
where relname like 'reports_20%%'
and relname < 'reports_' || to_char(current_date - 62, 'YYYYMMDD')
order by relname
""" )
# drop all of the old partitions
# use cascade so it takes out frames, extensions, etc. too
partitions = [ x for ( x, ) in cur.fetchall() ]
for partition in partitions:
cur.execute("DROP TABLE %s CASCADE" % ( partition, ))
print "%s dropped." % partition
# delete data from top crashers
cur.execute("""
DELETE FROM top_crashes_by_url_signature
USING top_crashes_by_url
WHERE top_crashes_by_url_id = top_crashes_by_url.id
AND window_end < ( now() - interval '60 days')
""")
cur.execute("""
VACUUM FULL top_crashes_by_url_signature
""")
cur.execute("""
DELETE FROM top_crashes_by_url
WHERE window_end < ( now() - interval '60 days')
""")
cur.execute("""
VACUUM FULL top_crashes_by_url
""")
print "top crashes by url truncated"
cur.execute("""
DELETE FROM top_crashes_by_signature
WHERE window_end < ( now() - interval '60 days')
""")
cur.execute("""
VACUUM FULL top_crashes_by_signature
""")
print "top_crashes_by_signature truncated"
# truncate raw_adi
cur.execute("""
DELETE FROM raw_adi
WHERE "date" < ( now() - interval '60 days')
""")
cur.execute("""
VACUUM FULL raw_adi
""")
print "raw_adi truncated"
# analyze
cur.execute("""
ANALYZE
""")
print "done truncating"
|
russelmahmud/mess-account
|
refs/heads/master
|
django/contrib/webdesign/templatetags/__init__.py
|
12133432
| |
cctaylor/googleads-python-lib
|
refs/heads/master
|
examples/adxbuyer/v201506/__init__.py
|
14
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Examples for DoubleClick Ad Exchange Buyer SOAP API."""
|
cyrushadavi/home_automation
|
refs/heads/master
|
yelp.py
|
6
|
__author__ = 'Cyrus'
import config
config_dict = config.get_dict_of_params()
|
mdhaman/superdesk-core
|
refs/heads/master
|
tests/io/feed_parsers/efe_nitf_tests.py
|
7
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.tests import TestCase
from superdesk.etree import etree
from superdesk.io.feed_parsers.efe_nitf import EFEFeedParser
class EFENITFTestCase(TestCase):
filename = 'efe_nitf.xml'
def setUp(self):
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.normpath(os.path.join(dirname, '../fixtures', self.filename))
provider = {'name': 'Test'}
with open(fixture) as f:
self.nitf = f.read()
self.item = EFEFeedParser().parse(etree.fromstring(self.nitf), provider)
def test_item(self):
self.assertEqual(self.item.get('headline'), "Honduran president announces Cabinet changes")
self.assertNotIn('byline', self.item)
self.assertEqual(self.item['dateline']['located']['city'], 'Tegucigalpa')
self.assertEqual(self.item['subject'][0]['qcode'], '11006005')
self.assertEqual(self.item['slugline'], 'HONDURAS GOVERNMENT')
|
gauribhoite/personfinder
|
refs/heads/master
|
env/google_appengine/google/appengine/tools/sdk_update_checker.py
|
13
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Checks for SDK updates."""
import datetime
import logging
import os
import socket
import ssl
import sys
import time
import urllib2
import google
import yaml
from google.appengine.api import validation
from google.appengine.api import yaml_object
VERSION_FILE = '../../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject():
"""Gets the version of the SDK by parsing the VERSION file.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
try:
version_fh = open(version_filename)
except IOError:
logging.error('Could not find version file at %s', version_filename)
return None
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class SDKUpdateChecker(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
rpcserver: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
rpcserver,
configs):
"""Create a new SDKUpdateChecker.
Args:
rpcserver: The AbstractRpcServer to use.
configs: A list of yaml objects or a single yaml object that specify the
configuration of this application.
"""
if not isinstance(configs, list):
configs = [configs]
self.rpcserver = rpcserver
self.runtimes = set(config.runtime for config in configs)
self.runtime_to_api_version = {}
for config in configs:
self.runtime_to_api_version.setdefault(
config.runtime, set()).add(config.api_version)
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject()
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
sys.exit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
unsupported_api_versions_found = False
for runtime, api_versions in self.runtime_to_api_version.items():
supported_api_versions = _GetSupportedApiVersions(version, runtime)
unsupported_api_versions = sorted(api_versions -
set(supported_api_versions))
if unsupported_api_versions:
unsupported_api_versions_found = True
if len(unsupported_api_versions) == 1:
logging.critical('The requested api_version (%s) is not supported by '
'the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions[0], runtime,
supported_api_versions)
else:
logging.critical('The requested api_versions (%s) are not supported '
'by the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions, runtime,
supported_api_versions)
if unsupported_api_versions_found:
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support an api_version named in
a configuration in self.configs.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
responses = {}
try:
for runtime in self.runtimes:
responses[runtime] = yaml.safe_load(self.rpcserver.Send(
'/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=runtime))
except (urllib2.URLError, socket.error, ssl.SSLError), e:
logging.info('Update check failed: %s', e)
return
try:
latest = sorted(responses.values(), reverse=True,
key=lambda release: _VersionList(release['release']))[0]
except ValueError:
logging.warn('Could not parse this release version')
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
for runtime, response in responses.items():
api_versions = _GetSupportedApiVersions(response, runtime)
obsolete_versions = sorted(
self.runtime_to_api_version[runtime] - set(api_versions))
if len(obsolete_versions) == 1:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions[0],
response, version, force=True)
elif obsolete_versions:
self._Nag(
'The api versions you are using (%s) are obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions,
response, version, force=True)
deprecated_versions = sorted(
self.runtime_to_api_version[runtime].intersection(api_versions[:-1]))
if len(deprecated_versions) == 1:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions[0], response, version)
elif deprecated_versions:
self._Nag(
'The api versions you are using (%s) are deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions, response, version)
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nag_filename)
except IOError:
return None
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print '****************************************************************'
print msg
print '-----------'
print 'Latest SDK:'
print yaml.dump(latest)
print '-----------'
print 'Your SDK:'
print yaml.dump(version)
print '-----------'
print 'Please visit https://developers.google.com/appengine/downloads'
print 'for the latest SDK'
print '****************************************************************'
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = 0.0
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print ('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' %
SDKUpdateChecker.MakeNagFilename())
nag.opt_in = False
else:
print ('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % SDKUpdateChecker.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
def _GetSupportedApiVersions(versions, runtime):
"""Returns the runtime-specific or general list of supported runtimes.
The provided 'versions' dict contains a field called 'api_versions'
which is the list of default versions supported. This dict may also
contain a 'supported_api_versions' dict which lists api_versions by
runtime. This function will prefer to return the runtime-specific
api_versions list, but will default to the general list.
Args:
versions: dict of versions from app.yaml or /api/updatecheck server.
runtime: string of current runtime (e.g. 'go').
Returns:
List of supported api_versions (e.g. ['go1']).
"""
if 'supported_api_versions' in versions:
return versions['supported_api_versions'].get(
runtime, versions)['api_versions']
return versions['api_versions']
|
kylon/pacman-fakeroot
|
refs/heads/upstream
|
test/pacman/tests/sync022.py
|
28
|
self.description = "Install a group from a sync db using --needed"
lp1 = pmpkg("pkg1")
lp2 = pmpkg("pkg2")
lp3 = pmpkg("pkg3")
sp1 = pmpkg("pkg1", "1.1-1")
sp2 = pmpkg("pkg2")
sp3 = pmpkg("pkg3")
for p in lp1, lp2, lp3, sp1, sp2, sp3:
setattr(p, "groups", ["grp"])
for p in lp1, lp2, lp3:
self.addpkg2db("local", p)
for p in sp1, sp2, sp3:
self.addpkg2db("sync", p);
self.args = "-S --needed grp"
self.addrule("PACMAN_RETCODE=0")
for p in sp1, sp2, sp3:
self.addrule("PKG_EXIST=%s" % p.name)
self.addrule("PKG_VERSION=pkg1|1.1-1")
|
Sodki/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vmware_guest.py
|
10
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest
short_description: Manages virtual machines in vcenter
description:
- Create new virtual machines (from templates or not)
- Power on/power off/restart a virtual machine
- Modify, rename or remove a virtual machine
version_added: 2.2
author:
- James Tanner (@jctanner) <tanner.jc@gmail.com>
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
notes:
- Tested on vSphere 5.5 and 6.0
requirements:
- "python >= 2.6"
- PyVmomi
options:
state:
description:
- What state should the virtual machine be in?
- If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to task arguments
required: True
choices: ['present', 'absent', 'poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']
name:
description:
- Name of the VM to work with
required: True
name_match:
description:
- If multiple VMs matching the name, use the first or last found
default: 'first'
choices: ['first', 'last']
uuid:
description:
- UUID of the instance to manage if known, this is VMware's unique identifier.
- This is required if name is not supplied.
template:
description:
- Template used to create VM.
- If this value is not set, VM is created without using a template.
- If the VM exists already this setting will be ignored.
is_template:
description:
- Flag the instance as a template
default: False
version_added: "2.3"
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest
hardware:
description:
- "Manage some VM hardware attributes."
- "Valid attributes are: memory_mb, num_cpus and scsi"
- "scsi: Valid values are buslogic, lsilogic, lsilogicsas and paravirtual (default)"
guest_id:
description:
- "Set the guest ID (Debian, RHEL, Windows...)"
- "This field is required when creating a VM"
- >
Valid values are referenced here:
https://www.vmware.com/support/developer/converter-sdk/conv55_apireference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
version_added: "2.3"
disk:
description:
- "A list of disks to add"
- "Valid attributes are: size_[tb,gb,mb,kb], type, datastore and autoselect_datastore"
- "type: Valid value is thin (default: None)"
- "datastore: Datastore to use for the disk. If autoselect_datastore is True, filter datastore selection."
- "autoselect_datastore (bool): select the less used datastore."
resource_pool:
description:
- Affect machine to the given resource pool
- Resource pool should be child of the selected host parent
default: None
version_added: "2.3"
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the VM
- This requires vmware-tools (vmtoolsd) to properly work after creation
default: False
force:
description:
- Ignore warnings and complete the actions
datacenter:
description:
- Destination datacenter for the deploy operation
default: ha-datacenter
cluster:
description:
- The cluster name where the VM will run.
version_added: "2.3"
esxi_hostname:
description:
- The esxi hostname where the VM will run.
annotation:
description:
- A note or annotation to include in the VM
version_added: "2.3"
customvalues:
description:
- Define a list of customvalues to set on VM.
- "A customvalue object takes 2 fields 'key' and 'value'."
version_added: "2.3"
networks:
description:
- Network to use should include C(name) or C(vlan) entry
- Add an optional C(ip) and C(netmask) for network configuration
- Add an optional C(gateway) entry to configure a gateway
- Add an optional C(mac) entry to customize mac address
- Add an optional C(dns_servers) or C(domain) entry per interface (Windows)
- Add an optional C(device_type) to configure the virtual NIC (pcnet32, vmxnet2, vmxnet3, e1000, e1000e)
version_added: "2.3"
customization:
description:
- "Parameters to customize template"
- "Common parameters (Linux/Windows):"
- " C(dns_servers) (list): List of DNS servers to configure"
- " C(dns_suffix) (list): List of domain suffixes, aka DNS search path (default: C(domain) parameter)"
- " C(domain) (string): DNS domain name to use"
- " C(hostname) (string): Computer hostname (default: C(name) parameter)"
- "Parameters related to windows customization:"
- " C(autologon) (bool): Auto logon after VM customization (default: False)"
- " C(autologoncount) (int): Number of autologon after reboot (default: 1)"
- " C(domainadmin) (string): User used to join in AD domain (mandatory with joindomain)"
- " C(domainadminpassword) (string): Password used to join in AD domain (mandatory with joindomain)"
- " C(fullname) (string): Server owner name (default: Administrator)"
- " C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup))"
- " C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP)"
- " C(orgname) (string): Organisation name (default: ACME)"
- " C(password) (string): Local administrator password (mandatory)"
- " C(productid) (string): Product ID"
- " C(runonce) (list): List of commands to run at first user logon"
- " C(timezone) (int): Timezone (default: 85) See U(https://msdn.microsoft.com/en-us/library/ms912391(v=winembedded.11).aspx)"
version_added: "2.3"
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Create a VM from a template
- name: create the VM
vmware_guest:
hostname: 192.0.2.44
username: administrator@vsphere.local
password: vmware
validate_certs: no
esxi_hostname: 192.0.2.117
datacenter: datacenter1
folder: testvms
name: testvm_2
state: poweredon
guest_id: centos64guest
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 1
scsi: paravirtual
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
mac: 'aa:bb:dd:aa:00:14'
template: template_el7
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
# Clone a VM from Template and customize
- name: Clone template and customize
vmware_guest:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: 'aa:bb:dd:aa:00:14'
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
customization:
autologon: True
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\Enable-WinRM.ps1 -ForceNewSSLCert
delegate_to: localhost
# Create a VM template
- name: create a VM template
vmware_guest:
hostname: 192.0.2.88
username: administrator@vsphere.local
password: vmware
validate_certs: no
datacenter: datacenter1
cluster: vmware_cluster_esx
resource_pool: highperformance_pool
folder: testvms
name: testvm_6
is_template: yes
guest_id: debian6_64Guest
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 1
scsi: lsilogic
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
# Rename a VM (requires the VM's uuid)
- vmware_guest:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
uuid: 421e4592-c069-924d-ce20-7e7533fab926
name: new_name
state: present
delegate_to: localhost
# Remove a VM by uuid
- vmware_guest:
hostname: 192.168.1.209
username: administrator@vsphere.local
password: vmware
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: absent
delegate_to: localhost
'''
RETURN = """
instance:
descripton: metadata about the new virtualmachine
returned: always
type: dict
sample: None
"""
import os
import time
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.vmware import get_all_objs, connect_to_api, gather_vm_facts
try:
import json
except ImportError:
import simplejson as json
HAS_PYVMOMI = False
try:
import pyVmomi
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
pass
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMWare Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
@staticmethod
def create_scsi_controller(scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if scsi_type == 'lsilogic':
scsi_ctl.device = vim.vm.device.VirtualLsiLogicController()
elif scsi_type == 'paravirtual':
scsi_ctl.device = vim.vm.device.ParaVirtualSCSIController()
elif scsi_type == 'buslogic':
scsi_ctl.device = vim.vm.device.VirtualBusLogicController()
elif scsi_type == 'lsilogicsas':
scsi_ctl.device = vim.vm.device.VirtualLsiLogicSASController()
scsi_ctl.device.deviceInfo = vim.Description()
scsi_ctl.device.slotInfo = vim.vm.device.VirtualDevice.PciBusSlotInfo()
scsi_ctl.device.slotInfo.pciSlotNumber = 16
scsi_ctl.device.controllerKey = 100
scsi_ctl.device.unitNumber = 3
scsi_ctl.device.busNumber = 0
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
@staticmethod
def is_scsi_controller(device):
return isinstance(device, vim.vm.device.VirtualLsiLogicController) or \
isinstance(device, vim.vm.device.ParaVirtualSCSIController) or \
isinstance(device, vim.vm.device.VirtualBusLogicController) or \
isinstance(device, vim.vm.device.VirtualLsiLogicSASController)
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.backing.diskMode = 'persistent'
diskspec.device.controllerKey = scsi_ctl.device.key
assert self.next_disk_unit_number != 7
assert disk_index != 7
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
if device_type == 'pcnet32':
nic.device = vim.vm.device.VirtualPCNet32()
elif device_type == 'vmxnet2':
nic.device = vim.vm.device.VirtualVmxnet2()
elif device_type == 'vmxnet3':
nic.device = vim.vm.device.VirtualVmxnet3()
elif device_type == 'e1000':
nic.device = vim.vm.device.VirtualE1000()
elif device_type == 'e1000e':
nic.device = vim.vm.device.VirtualE1000e()
elif device_type == 'sriov':
nic.device = vim.vm.device.VirtualSriovEthernetCard()
else:
self.module.fail_json(msg="Invalid device_type '%s' for network %s" %
(device_type, device_infos['name']))
nic.device.wakeOnLanEnabled = True
nic.device.addressType = 'assigned'
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = True
nic.device.connectable.allowGuestControl = True
nic.device.connectable.connected = True
if 'mac' in device_infos:
nic.device.macAddress = device_infos['mac']
return nic
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content):
self.content = content
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
def get_network(self, network):
if network not in self.networks:
self.networks[network] = get_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = get_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = get_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
class PyVmomiHelper(object):
def __init__(self, module):
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi module required')
self.module = module
self.device_helper = PyVmomiDeviceHelper(self.module)
self.params = module.params
self.si = None
self.content = connect_to_api(self.module)
self.configspec = None
self.change_detected = False
self.customspec = None
self.current_vm_obj = None
self.cache = PyVmomiCache(self.content)
def should_deploy_from_template(self):
return self.params.get('template') is not None
def getvm(self, name=None, uuid=None, folder=None):
# https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html
# self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder')
vm = None
searchpath = None
if uuid:
vm = self.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True)
elif folder:
# Build the absolute folder path to pass into the search method
if not self.params['folder'].startswith('/'):
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
searchpath = '%(datacenter)s%(folder)s' % self.params
# get all objects for this path ...
f_obj = self.content.searchIndex.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == name:
vm = c_obj
if self.params['name_match'] == 'first':
break
if vm:
self.current_vm_obj = vm
return vm
def set_powerstate(self, vm, state, force):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = self.gather_facts(vm)
expected_state = state.replace('_', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "VM is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart VM in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend VM in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon' and vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
else:
task = vm.RebootGuest()
else:
result['failed'] = True
result['msg'] = "VM %s must be in poweredon state & tools should be installed for guest shutdown/reboot" % vm.name
except Exception:
e = get_exception()
result['failed'] = True
result['msg'] = str(e)
if task:
self.wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = str(task.info.error.msg)
else:
result['changed'] = True
# need to get new metadata if changed
if result['changed']:
newvm = self.getvm(uuid=vm.config.uuid)
facts = self.gather_facts(newvm)
result['instance'] = facts
return result
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': False, 'failed': True, 'msg': task.info.error.msg}
else:
return {'changed': True, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.should_deploy_from_template() and self.params.get('guest_id') is None:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if vm_obj is None or self.params['guest_id'] != vm_obj.summary.config.guestId:
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
self.configspec.numCPUs = int(self.params['hardware']['num_cpus'])
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.should_deploy_from_template():
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
self.configspec.memoryMB = int(self.params['hardware']['memory_mb'])
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.should_deploy_from_template():
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
def get_vm_network_interfaces(self, vm=None):
if vm is None:
return []
device_list = []
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualPCNet32) or \
isinstance(device, vim.vm.device.VirtualVmxnet2) or \
isinstance(device, vim.vm.device.VirtualVmxnet3) or \
isinstance(device, vim.vm.device.VirtualE1000) or \
isinstance(device, vim.vm.device.VirtualE1000e) or \
isinstance(device, vim.vm.device.VirtualSriovEthernetCard):
device_list.append(device)
return device_list
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = list()
for network in self.params['networks']:
if 'ip' in network or 'netmask' in network:
if 'ip' not in network or not 'netmask' in network:
self.module.fail_json(msg="Both 'ip' and 'netmask' are required together.")
if 'name' in network:
if get_obj(self.content, [vim.Network], network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exists" % network)
elif 'vlan' in network:
dvps = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and dvp.config.defaultPortConfig.vlan.vlanId == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist" % network)
else:
self.module.fail_json(msg="You need to define a network name or a vlan")
network_devices.append(network)
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
# Default device type is vmxnet3, VMWare best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic_change_detected = False
if key < len(current_net_devices) and (vm_obj or self.should_deploy_from_template()):
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
nic.device = current_net_devices[key]
nic.device.deviceInfo = vim.Description()
else:
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_devices[key]['name']), 'portKeys'):
# VDS switch
pg_obj = get_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_devices[key]['name'])
if (nic.device.backing and
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
nic_change_detected = True
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_devices[key]['name'])
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_devices[key]['name']:
nic.device.backing.deviceName = network_devices[key]['name']
nic_change_detected = True
if nic_change_detected:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def customize_customvalues(self, vm_obj):
if len(self.params['customvalues']) == 0:
return
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
try:
vm_obj.setCustomValue(key=kv['key'], value=kv['value'])
self.change_detected = True
except Exception:
e = get_exception()
self.module.fail_json(msg="Failed to set custom value for key='%s' and value='%s'. Error was: %s"
% (kv['key'], kv['value'], e))
def customize_vm(self, vm_obj):
# Network settings
adaptermaps = []
for network in self.params['networks']:
if 'ip' in network and 'netmask' in network:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif self.params['customization'].get('domain'):
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif self.params['customization'].get('dns_servers'):
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization'].get('dns_servers')
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization'] or 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization'].get('dns_suffix', self.params['customization']['domain'])
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# If I install a Windows use Sysprep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
ident.userData.computerName = vim.vm.customization.FixedName()
ident.userData.computerName.name = str(self.params['customization'].get('hostname', self.params['name']))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
ident.guiUnattended = vim.vm.customization.GuiUnattended()
ident.guiUnattended.autoLogon = self.params['customization'].get('autologon', False)
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
ident.guiUnattended.timeZone = self.params['customization'].get('timezone', 85)
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
else:
self.module.fail_json(msg="The 'customization' section requires a 'password' entry, which cannot be empty.")
if 'productid' in self.params['customization']:
ident.userData.orgName = str(self.params['customization']['productid'])
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization'].get('domainadmin'))
ident.identification.joinDomain = str(self.params['customization'].get('joindomain'))
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization'].get('domainadminpassword'))
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization'].get('joinworkgroup'))
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# Else use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization'].get('domain'))
ident.hostName = vim.vm.customization.FixedName()
ident.hostName.name = str(self.params['customization'].get('hostname', self.params['name']))
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exists no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size_tb, size_gb, size_mb, size_kb, size_b ...?
if 'size' in expected_disk_spec:
expected = ''.join(c for c in expected_disk_spec['size'] if c.isdigit())
unit = expected_disk_spec['size'].replace(expected, '').lower()
expected = int(expected)
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
if unit == 'tb':
return expected * 1024 * 1024 * 1024
elif unit == 'gb':
return expected * 1024 * 1024
elif unit == ' mb':
return expected * 1024
elif unit == 'kb':
return expected
self.module.fail_json(
msg='%s is not a supported unit for disk size. Supported units are kb, mb, gb or tb' % unit)
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# is it thin?
if 'type' in expected_disk_spec:
if expected_disk_spec.get('type', '').lower() == 'thin':
diskspec.device.backing.thinProvisioned = True
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
kb = self.get_configured_disk_size(expected_disk_spec)
# VMWare doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is lesser than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
# if the user wants a cluster, get the list of hosts for the cluster and use the first one
if self.params['cluster']:
cluster = self.cache.get_cluster(self.params['cluster'])
if not cluster:
self.module.fail_json(msg="Failed to find a cluster named %(cluster)s" % self.params)
hostsystems = [x for x in cluster.host]
# TODO: add a policy to select host
hostsystem = hostsystems[0]
else:
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg="Failed to find a host named %(esxi_hostname)s" % self.params)
return hostsystem
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
datastore = get_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore "
"should be provided to select datastore")
if not datastore and self.should_deploy_from_template():
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
datastore = disks[0].backing.datastore
datastore_name = datastore.name
if not datastore:
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
assert obj is not None and parent is not None
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
current_parent = current_parent.parent
if current_parent is None:
return False
def select_resource_pool(self, host):
resource_pools = get_all_objs(self.content, [vim.ResourcePool])
for rp in resource_pools.items():
if not rp[0]:
continue
if not hasattr(rp[0], 'parent'):
continue
# Find resource pool on host
if self.obj_has_parent(rp[0].parent, host.parent):
# If no resource_pool selected or it's the selected pool, return it
if self.module.params['resource_pool'] is None or rp[0].name == self.module.params['resource_pool']:
return rp[0]
if self.module.params['resource_pool'] is not None:
self.module.fail_json(msg="Could not find resource_pool %s for selected host %s"
% (self.module.params['resource_pool'], host.name))
else:
self.module.fail_json(msg="Failed to find a resource group for %s" % host.name)
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - multiple datacenters
# - multiple templates by the same name
# - static IPs
#datacenters = get_all_objs(self.content, [vim.Datacenter])
datacenter = get_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if not datacenter:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
destfolder = None
if not self.params['folder'].startswith('/'):
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
f_obj = self.content.searchIndex.FindByInventoryPath('/%(datacenter)s%(folder)s' % self.params)
if f_obj is None:
self.module.fail_json(msg='No folder matched the path: %(folder)s' % self.params)
destfolder = f_obj
hostsystem = self.select_host()
if self.should_deploy_from_template():
# FIXME: need to search for this in the same way as guests to ensure accuracy
vm_obj = get_obj(self.content, [vim.VirtualMachine], self.params['template'])
if not vm_obj:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# set the destination datastore for VM & disks
(datastore, datastore_name) = self.select_datastore(vm_obj)
resource_pool = self.select_resource_pool(hostsystem)
self.configspec = vim.vm.ConfigSpec(cpuHotAddEnabled=True, memoryHotAddEnabled=True)
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
if len(self.params['customization']) > 0 or len(self.params['networks']) > 0:
self.customize_vm(vm_obj=vm_obj)
try:
if self.should_deploy_from_template():
# create the relocation spec
relospec = vim.vm.RelocateSpec()
relospec.host = hostsystem
relospec.datastore = datastore
relospec.pool = resource_pool
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=relospec)
if self.customspec:
clonespec.customization = self.customspec
clonespec.config = self.configspec
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "] " + self.params["name"])
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
self.change_detected = True
self.wait_for_task(task)
except TypeError:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs.")
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
self.customize_customvalues(vm_obj=vm)
if self.params['wait_for_ip_address'] or self.params['state'] in ['poweredon', 'restarted']:
self.set_powerstate(vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
self.wait_for_vm_ip(vm)
vm_facts = self.gather_facts(vm)
return {'changed': self.change_detected, 'failed': False, 'instance': vm_facts}
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
relospec = vim.vm.RelocateSpec()
hostsystem = self.select_host()
relospec.pool = self.select_resource_pool(hostsystem)
change_applied = False
if relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=relospec)
self.wait_for_task(task)
change_applied = True
# Only send VMWare task if we see a modification
if self.change_detected:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
self.wait_for_task(task)
change_applied = True
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
return {'changed': change_applied, 'failed': True, 'msg': task.info.error.msg}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
change_applied = True
if task.info.state == 'error':
return {'changed': change_applied, 'failed': True, 'msg': task.info.error.msg}
# Mark VM as Template
if self.params['is_template']:
self.current_vm_obj.MarkAsTemplate()
change_applied = True
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': change_applied, 'failed': False, 'instance': vm_facts}
@staticmethod
def wait_for_task(task):
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['success', 'error']:
time.sleep(1)
def wait_for_vm_ip(self, vm, poll=100, sleep=5):
ips = None
facts = {}
thispoll = 0
while not ips and thispoll <= poll:
newvm = self.getvm(uuid=vm.config.uuid)
facts = self.gather_facts(newvm)
if facts['ipv4'] or facts['ipv6']:
ips = True
else:
time.sleep(sleep)
thispoll += 1
return facts
def get_obj(content, vimtype, name):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
def main():
module = AnsibleModule(
argument_spec=dict(
hostname=dict(
type='str',
default=os.environ.get('VMWARE_HOST')
),
username=dict(
type='str',
default=os.environ.get('VMWARE_USER')
),
password=dict(
type='str', no_log=True,
default=os.environ.get('VMWARE_PASSWORD')
),
state=dict(
required=False,
choices=[
'poweredon',
'poweredoff',
'present',
'absent',
'restarted',
'suspended',
'shutdownguest',
'rebootguest'
],
default='present'),
validate_certs=dict(type='bool', default=True),
template_src=dict(type='str', aliases=['template']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(required=True, type='str'),
name_match=dict(type='str', default='first'),
uuid=dict(type='str'),
folder=dict(type='str', default='/vm'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', no_log=True, default={}),
),
supports_check_mode=True,
mutually_exclusive=[
['esxi_hostname', 'cluster'],
],
required_together=[
['state', 'force'],
['template'],
],
)
result = {'failed': False, 'changed': False}
# Prepend /vm if it was missing from the folder path, also strip trailing slashes
if not module.params['folder'].startswith('/vm') and module.params['folder'].startswith('/'):
module.params['folder'] = '/vm%(folder)s' % module.params
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.getvm(name=module.params['name'],
folder=module.params['folder'],
uuid=module.params['uuid'])
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.params['force']:
# has to be poweredoff first
pyv.set_powerstate(vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm)
elif module.params['state'] == 'present':
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
# set powerstate
tmp_result = pyv.set_powerstate(vm, module.params['state'], module.params['force'])
if tmp_result['changed']:
result["changed"] = True
if not tmp_result["failed"]:
result["failed"] = False
else:
# This should not happen
assert False
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
# Create it ...
result = pyv.deploy_vm()
if 'failed' not in result:
result['failed'] = False
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
andip71/boeffla-kernel-samsung-s5
|
refs/heads/boeffla
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
odlgroup/odl
|
refs/heads/master
|
odl/contrib/solvers/spdhg/examples/ROF_1k2_primal.py
|
2
|
# Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""An example of using the SPDHG algorithm to solve a TV denoising problem
with Gaussian noise. We exploit the strong convexity of the data term to get
1/k^2 convergence on the primal part. We compare different algorithms for this
problem and visualize the results as in [CERS2017].
Reference
---------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
from __future__ import division, print_function
import os
import odl.contrib.solvers.spdhg as spdhg
import odl.contrib.datasets.images as images
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import odl
import brewer2mpl
# create folder structure and set parameters
folder_out = '.' # to be changed
filename = 'ROF_1k2_primal'
nepoch = 300
niter_target = 2000
subfolder = '{}epochs'.format(nepoch)
folder_main = '{}/{}'.format(folder_out, filename)
if not os.path.exists(folder_main):
os.makedirs(folder_main)
folder_today = '{}/{}'.format(folder_main, subfolder)
if not os.path.exists(folder_today):
os.makedirs(folder_today)
folder_npy = '{}/npy'.format(folder_today)
if not os.path.exists(folder_npy):
os.makedirs(folder_npy)
# create ground truth
image_gray = images.building(gray=True)
X = odl.uniform_discr([0, 0], image_gray.shape, image_gray.shape)
groundtruth = X.element(image_gray)
clim = [0, 1]
# create data
data = odl.phantom.white_noise(X, mean=groundtruth, stddev=0.1, seed=1807)
# save images and data
if not os.path.exists('{}/groundtruth.png'.format(folder_main)):
spdhg.save_image(groundtruth, 'groundtruth', folder_main, 1, clim=clim)
spdhg.save_image(data, 'data', folder_main, 2, clim=clim)
alpha = .12 # set regularisation parameter
gamma = 0.99 # gamma^2 is upper bound of step size constraint
# create forward operators
Dx = odl.PartialDerivative(X, 0, pad_mode='symmetric')
Dy = odl.PartialDerivative(X, 1, pad_mode='symmetric')
A = odl.BroadcastOperator(Dx, Dy)
Y = A.range
# set up functional f
f = odl.solvers.SeparableSum(*[odl.solvers.L1Norm(Yi) for Yi in Y])
# set up functional g
g = 1 / (2 * alpha) * odl.solvers.L2NormSquared(X).translated(data)
obj_fun = f * A + g # define objective function
mu_g = 1 / alpha # define strong convexity constants
# create target / compute a saddle point
file_target = '{}/target.npy'.format(folder_main)
if not os.path.exists(file_target):
# compute a saddle point with PDHG and time the reconstruction
callback = (odl.solvers.CallbackPrintIteration(step=10, end=', ') &
odl.solvers.CallbackPrintTiming(step=10, cumulative=True))
x_opt, y_opt = X.zero(), Y.zero() # initialise variables
normA = np.sqrt(8) # compute norm of operator
sigma, tau = (gamma / normA,) * 2 # set step size parameters
# compute a saddle point with PDHG and time the reconstruction
odl.solvers.pdhg(x_opt, f, g, A, tau, sigma, niter_target, y=y_opt,
callback=callback)
# subgradients at saddle
subx_opt = -A.adjoint(y_opt)
suby_opt = A(x_opt)
obj_opt = obj_fun(x_opt) # objective value at saddle
# save saddle point
np.save(file_target, (x_opt, y_opt, subx_opt, suby_opt, obj_opt, normA))
# show saddle point and subgradients
spdhg.save_image(x_opt, 'x_saddle', folder_main, 1, clim=clim)
spdhg.save_image(y_opt[0], 'y_saddle[0]', folder_main, 2)
spdhg.save_image(subx_opt, 'subx_saddle', folder_main, 3)
spdhg.save_image(suby_opt[0], 'suby_saddle[0]', folder_main, 4)
else:
(x_opt, y_opt, subx_opt, suby_opt, obj_opt, normA) = np.load(file_target)
# set norms of the primal and dual variable
dist_x = odl.solvers.L2NormSquared(X).translated(x_opt)
dist_y = odl.solvers.L2NormSquared(Y).translated(y_opt)
# create Bregman distances for f and g
bregman_g = spdhg.bregman(g, x_opt, subx_opt)
# define Bregman distance for f and f_p
bregman_f = odl.solvers.SeparableSum(
*[spdhg.bregman(fi.convex_conj, yi, ri)
for fi, yi, ri in zip(f, y_opt, suby_opt)])
class CallbackStore(odl.solvers.util.callback.Callback):
"""Callback to store function values"""
def __init__(self, alg, iter_save, iter_plot):
self.iter_save = iter_save
self.iter_plot = iter_plot
self.iter = 0
self.alg = alg
self.ex, self.ey = X.zero(), Y.zero()
self.out = []
def __call__(self, w):
if self.iter > 0:
k = self.iter
self.ex = 1 / k * ((k - 1) * self.ex + w[0])
self.ey = 1 / k * ((k - 1) * self.ey + w[1])
if self.iter in self.iter_save:
obj = obj_fun(w[0])
breg_x = bregman_g(w[0])
breg_y = bregman_f(w[1])
breg = breg_x + breg_y
breg_ex = bregman_g(self.ex)
breg_ey = bregman_f(self.ey)
breg_erg = breg_ex + breg_ey
dx = dist_x(w[0])
dy = dist_y(w[1])
dist = dx + dy
dex = dist_x(self.ex)
dey = dist_y(self.ey)
dist_erg = dex + dey
self.out.append({'obj': obj, 'breg': breg, 'breg_x': breg_x,
'breg_y': breg_y, 'breg_erg': breg_erg,
'breg_ex': breg_ex, 'breg_ey': breg_ey,
'dist': dist, 'dist_x': dx, 'dist_y': dy,
'dist_erg': dist_erg, 'dist_ex': dex,
'dist_ey': dey, 'iter': self.iter})
if self.iter in self.iter_plot:
fname = '{}_{}'.format(self.alg, self.iter)
spdhg.save_image(w[0], fname, folder_today, 1, clim=clim)
self.iter += 1
# number of subsets for each algorithm
nsub = {'pdhg': 1, 'pa_pdhg': 1, 'pesquet_uni2': 2, 'spdhg_uni2': 2,
'pa_spdhg_uni2': 2, 'odl': 1, 'pa_odl': 1}
# number of iterations for each algorithm
niter, iter_save, iter_plot = {}, {}, {}
for alg in nsub.keys():
niter[alg] = nepoch * nsub[alg]
iter_save[alg] = range(0, niter[alg] + 1, nsub[alg])
iter_plot[alg] = list(np.array([10, 20, 30, 40, 100, 300]) * nsub[alg])
# %% --- Run algorithms ---
# TODO: ODL version to be included once the callback includes dual iterates
# for alg in ['pdhg', 'pesquet_uni2', 'pa_pdhg', 'spdhg_uni2', 'pa_spdhg_uni2',
# 'odl', 'pa_odl']:
for alg in ['pdhg', 'pesquet_uni2', 'pa_pdhg', 'spdhg_uni2', 'pa_spdhg_uni2']:
print('======= ' + alg + ' =======')
# clear variables in order not to use previous instances
prob, sigma, tau, theta = [None] * 4
# create lists for subset division
n = nsub[alg]
(sub2ind, ind2sub) = spdhg.divide_1Darray_equally(range(2), n)
# set random seed so that results are reproducable
np.random.seed(1807)
# choose parameters for algorithm
if alg == 'pdhg' or alg == 'pa_pdhg':
prob_subset = [1] * n
prob = [1] * len(Y)
sigma = [gamma / normA] * len(Y)
tau = gamma / normA
elif alg == 'odl' or alg == 'pa_odl':
sigma = gamma / normA
tau = gamma / normA
elif alg == 'pesquet_uni2':
prob_subset = [1 / n] * n
prob = [1 / n] * len(Y)
sigma = [gamma / normA] * len(Y)
tau = gamma / normA
elif alg in ['spdhg_uni2'] or alg in ['pa_spdhg_uni2']:
normAi = [2] * n
prob_subset = [1 / n] * n
prob = [1 / n] * len(Y)
sigma = [gamma / nA for nA in normAi]
tau = gamma / (n * max(normAi))
else:
assert False, "Parameters not defined"
# function that selects the indices every iteration
def fun_select(k):
return sub2ind[int(np.random.choice(n, 1, p=prob_subset))]
# output function to be used within the iterations
callback = (odl.solvers.CallbackPrintIteration(fmt='iter:{:4d}', step=n,
end=', ') &
odl.solvers.CallbackPrintTiming(fmt='time/iter: {:5.2f} s',
step=n, end=', ') &
odl.solvers.CallbackPrintTiming(fmt='time: {:5.2f} s',
cumulative=True, step=n) &
CallbackStore(alg, iter_save[alg], iter_plot[alg]))
x, y = X.zero(), Y.zero() # initialise variables
callback([x, y])
if alg.startswith('pdhg') or alg.startswith('spdhg'):
spdhg.spdhg(x, f, g, A, tau, sigma, niter[alg], prob=prob, y=y,
fun_select=fun_select, callback=callback)
elif alg.startswith('pa_pdhg') or alg.startswith('pa_spdhg'):
spdhg.pa_spdhg(x, f, g, A, tau, sigma, niter[alg], mu_g, prob=prob,
y=y, fun_select=fun_select, callback=callback)
elif alg.startswith('odl'):
odl.solvers.pdhg(x, f, g, A, tau, sigma, niter[alg], y=y,
callback=callback)
elif alg.startswith('pa_odl'):
odl.solvers.pdhg(x, f, g, A, tau, sigma, niter[alg], y=y,
callback=callback, gamma_primal=mu_g)
elif alg.startswith('pesquet'):
spdhg.spdhg_pesquet(x, f, g, A, tau, sigma, niter[alg],
fun_select=fun_select, y=y, callback=callback)
else:
assert False, "Algorithm not defined"
out = callback.callbacks[1].out
np.save('{}/{}_output'.format(folder_npy, alg), (iter_save[alg],
niter[alg], x, out, nsub[alg]))
# %% --- Analyse and visualise the output ---
algs = ['pdhg', 'pesquet_uni2', 'pa_pdhg', 'spdhg_uni2', 'pa_spdhg_uni2']
iter_save_v, niter_v, image_v, out_v, nsub_v = {}, {}, {}, {}, {}
for a in algs:
(iter_save_v[a], niter_v[a], image_v[a], out_v[a], nsub_v[a]) = np.load(
'{}/{}_output.npy'.format(folder_npy, a))
epochs_save = {a: np.array(iter_save_v[a]) / np.float(nsub_v[a]) for a in algs}
out_resorted = {}
for a in algs:
print('==== ' + a)
out_resorted[a] = {}
K = len(iter_save_v[a])
for meas in out_v[a][0].keys(): # quality measures
print(' ==== ' + meas)
out_resorted[a][meas] = np.nan * np.ones(K)
for k in range(K): # iterations
out_resorted[a][meas][k] = out_v[a][k][meas]
meas = 'obj_rel'
print(' ==== ' + meas)
out_resorted[a][meas] = np.nan * np.ones(K)
for k in range(K): # iterations
out_resorted[a][meas][k] = ((out_v[a][k]['obj'] - obj_opt) /
(out_v[a][0]['obj'] - obj_opt))
for a in algs: # algorithms
for meas in out_resorted[a].keys(): # quality measures
for k in range(K): # iterations
if out_resorted[a][meas][k] <= 0:
out_resorted[a][meas][k] = np.nan
fig = plt.figure()
markers = plt.Line2D.filled_markers
all_plots = out_resorted[algs[0]].keys()
logy_plot = ['obj', 'obj_rel', 'dist_x', 'dist_y', 'breg', 'breg_y', 'breg_x',
'ebreg', 'ebreg_x', 'ebreg_y']
for plotx in ['linx', 'logx']:
for meas in all_plots:
print('============ ' + plotx + ' === ' + meas + ' ============')
fig = plt.figure(1)
plt.clf()
if plotx == 'linx':
if meas in logy_plot:
for a in algs:
x = epochs_save[a]
y = out_resorted[a][meas]
plt.semilogy(x, y, linewidth=3, label=a)
else:
for j, a in enumerate(algs):
x = epochs_save[a]
y = out_resorted[a][meas]
plt.plot(x, y, linewidth=3, marker=markers[j],
markersize=7, markevery=.1, label=a)
elif plotx == 'logx':
if meas in logy_plot:
for a in algs:
x = epochs_save[a][1:]
y = out_resorted[a][meas][1:]
plt.loglog(x, y, linewidth=3, label=a)
else:
for j, a in enumerate(algs):
x = epochs_save[a][1:]
y = out_resorted[a][meas][1:]
plt.semilogx(x, y, linewidth=3, marker=markers[j],
markersize=7, markevery=.1, label=a)
plt.title('{} v iteration'.format(meas))
h = plt.gca()
h.set_xlabel('epochs')
plt.legend(loc='best')
fig.savefig('{}/{}_{}.png'.format(folder_today, plotx, meas),
bbox_inches='tight')
# %% --- Prepapare visual output as in [1] ---
# set line width and style
lwidth = 2
lwidth_help = 2
lstyle = '-'
lstyle_help = '--'
# set colors using colorbrewer
bmap = brewer2mpl.get_map('Paired', 'Qualitative', 5)
colors = bmap.mpl_colors
# set latex options
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
# set font
fsize = 15
font = {'family': 'serif', 'size': fsize}
matplotlib.rc('font', **font)
matplotlib.rc('axes', labelsize=fsize) # fontsize of x and y labels
matplotlib.rc('xtick', labelsize=fsize) # fontsize of xtick labels
matplotlib.rc('ytick', labelsize=fsize) # fontsize of ytick labels
matplotlib.rc('legend', fontsize=fsize) # legend fontsize
# markers
marker = ('o', 'v', 's', 'p', 'd') # set markers
mevery = [(i / 30., .1) for i in range(20)] # how many markers to draw
msize = 9 # marker size
algs = ['pdhg', 'pa_pdhg', 'spdhg_uni2', 'pa_spdhg_uni2', 'pesquet_uni2']
label = ['PDHG', 'PA-PDHG', 'SPDHG', 'PA-SPDHG', 'Pesquet\\&Repetti']
fig = []
# draw first figure
fig.append(plt.figure(1))
plt.clf()
xlim = [1, 300]
ylim = [2e-1, 1e+3]
meas = 'dist_x'
alg_i = [0, 1, 3]
for j in alg_i:
a = algs[j]
x = epochs_save[a]
y = out_resorted[a][meas]
i = (np.less_equal(x, xlim[1]) & np.greater_equal(x, xlim[0]) &
np.less_equal(y, ylim[1]) & np.greater_equal(y, ylim[0]))
plt.loglog(x[i], y[i], color=colors[j], linestyle=lstyle, linewidth=lwidth,
marker=marker[j], markersize=msize, markevery=mevery[j],
label=label[j])
y = 5e+4 / np.array(iter_save_v[alg])**2
plt.loglog(x[i], y[i], color='gray', linestyle=lstyle_help,
linewidth=lwidth_help, label=r'$\mathcal O(1/K^2)$')
plt.gca().set_xlabel('iterations [epochs]')
plt.gca().set_ylabel('primal distance')
plt.gca().yaxis.set_ticks(np.logspace(0, 2, 3))
plt.ylim((5e-1, 1e+3))
plt.legend(ncol=1, frameon=False)
# ### next figure
fig.append(plt.figure(2))
plt.clf()
ylim = [1e-5, 100]
meas = 'obj_rel'
alg_i = [0, 1, 2, 3, 4]
for j in alg_i:
a = algs[j]
x = epochs_save[a]
y = out_resorted[a][meas]
i = (np.less_equal(x, xlim[1]) & np.greater_equal(x, xlim[0]) &
np.less_equal(y, ylim[1]) & np.greater_equal(y, ylim[0]))
plt.loglog(x[i], y[i], color=colors[j], linestyle=lstyle, linewidth=lwidth,
marker=marker[j], markersize=msize, markevery=mevery[j],
label=label[j])
plt.gca().set_xlabel('iterations [epochs]')
plt.gca().set_ylabel('relative objective')
plt.gca().yaxis.set_ticks(np.logspace(-5, -1, 3))
plt.legend(frameon=False)
# %%
for i, fi in enumerate(fig):
fi.savefig('{}/output{}.png'.format(folder_today, i), bbox_inches='tight')
|
baffolobill/mb_test_1
|
refs/heads/master
|
src/mbtest1/erp_client/__init__.py
|
12133432
| |
frappe/erpnext
|
refs/heads/develop
|
erpnext/accounts/print_format/gst_purchase_invoice/__init__.py
|
12133432
| |
vanda/DigitalLabels
|
refs/heads/master
|
labels/__init__.py
|
12133432
| |
HyperBaton/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/netscaler/__init__.py
|
12133432
| |
google-research/tapas
|
refs/heads/master
|
tapas/retrieval/tfidf_baseline.py
|
1
|
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A simple TF-IDF model for table retrieval."""
from typing import Iterable, List, Text
from absl import app
from absl import flags
from absl import logging
import pandas as pd
from tapas.protos import interaction_pb2
from tapas.retrieval import tfidf_baseline_utils
from tapas.scripts import prediction_utils
FLAGS = flags.FLAGS
flags.DEFINE_list("interaction_files", None,
"Interaction protos in tfrecord format.")
flags.DEFINE_string("table_file", None, "Table protos in tfrecord format.")
flags.DEFINE_integer("max_table_rank", 50, "Max number of tables to retrieve.")
flags.DEFINE_integer("min_term_rank", 100,
"Min term frequency rank to consider.")
flags.DEFINE_boolean("drop_term_frequency", True,
"If True, ignore term frequency term.")
def _print(message):
logging.info(message)
print(message)
def evaluate(index, max_table_rank,
thresholds,
interactions,
rows):
"""Evaluates index against interactions."""
ranks = []
for nr, interaction in enumerate(interactions):
for question in interaction.questions:
scored_hits = index.retrieve(question.original_text)
reference_table_id = interaction.table.table_id
for rank, (table_id, _) in enumerate(scored_hits[:max_table_rank]):
if table_id == reference_table_id:
ranks.append(rank)
break
if nr % (len(interactions) // 10) == 0:
_print(f"Processed {nr:5d} / {len(interactions):5d}.")
def precision_at_th(threshold):
return sum(1 for rank in ranks if rank < threshold) / len(interactions)
values = [f"{precision_at_th(threshold):.4}" for threshold in thresholds]
rows.append(values)
def create_index(tables,
title_multiplicator, use_bm25):
if use_bm25:
return tfidf_baseline_utils.create_bm25_index(
tables,
title_multiplicator=title_multiplicator,
)
return tfidf_baseline_utils.create_inverted_index(
tables=tables,
min_rank=FLAGS.min_term_rank,
drop_term_frequency=FLAGS.drop_term_frequency,
title_multiplicator=title_multiplicator,
)
def get_hparams():
hparams = []
for multiplier in [1, 2]:
hparams.append({"multiplier": multiplier, "use_bm25": False})
for multiplier in [10, 15]:
hparams.append({"multiplier": multiplier, "use_bm25": True})
return hparams
def main(_):
max_table_rank = FLAGS.max_table_rank
thresholds = [1, 5, 10, 15, max_table_rank]
for interaction_file in FLAGS.interaction_files:
_print(f"Test set: {interaction_file}")
interactions = list(prediction_utils.iterate_interactions(interaction_file))
for use_local_index in [True, False]:
rows = []
row_names = []
for hparams in get_hparams():
name = "local" if use_local_index else "global"
name += "_bm25" if hparams["use_bm25"] else "_tfidf"
name += f'_tm{hparams["multiplier"]}'
_print(name)
if use_local_index:
index = create_index(
tables=(i.table for i in interactions),
title_multiplicator=hparams["multiplier"],
use_bm25=hparams["use_bm25"],
)
else:
index = create_index(
tables=tfidf_baseline_utils.iterate_tables(FLAGS.table_file),
title_multiplicator=hparams["multiplier"],
use_bm25=hparams["use_bm25"],
)
_print("... index created.")
evaluate(index, max_table_rank, thresholds, interactions, rows)
row_names.append(name)
df = pd.DataFrame(rows, columns=thresholds, index=row_names)
_print(df.to_string())
if __name__ == "__main__":
flags.mark_flag_as_required("interaction_files")
flags.mark_flag_as_required("table_file")
app.run(main)
|
mbauskar/sapphire-erpnext
|
refs/heads/master
|
erpnext/support/report/__init__.py
|
12133432
| |
coala/coala
|
refs/heads/master
|
coalib/bears/__init__.py
|
12133432
| |
mvaled/sentry
|
refs/heads/master
|
tests/sentry/utils/test_datastructures.py
|
3
|
from __future__ import absolute_import
import pytest
from sentry.utils.datastructures import BidirectionalMapping
def test_bidirectional_mapping():
value = BidirectionalMapping({"a": 1, "b": 2})
assert value["a"] == 1
assert value["b"] == 2
assert value.get_key(1) == "a"
assert value.get_key(2) == "b"
assert value.inverse() == {1: "a", 2: "b"}
value["c"] = 3
assert value["c"] == 3
assert value.get_key(3) == "c"
with pytest.raises(KeyError):
value["d"]
with pytest.raises(KeyError):
value.get_key(4)
with pytest.raises(TypeError):
value["d"] = [1, 2, 3] # not hashable
assert len(value) == len(value.inverse()) == 3
del value["c"]
assert len(value) == len(value.inverse()) == 2
|
StefanRijnhart/odoomrp-wip
|
refs/heads/8.0
|
mrp_subcontracting/models/purchase_order.py
|
3
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
mrp_operation = fields.Many2one(
'mrp.production.workcenter.line', 'MPR Operation')
mrp_production = fields.Many2one(
'mrp.production', string='MRP Production', store=True,
related="mrp_operation.production_id")
@api.one
def wkf_confirm_order(self):
picking_obj = self.env['stock.picking']
result = super(PurchaseOrder, self).wkf_confirm_order()
picking = False
if self.mrp_operation:
for move in self.mrp_operation.production_id.move_lines:
if move.work_order.id == self.mrp_operation.id:
if not picking:
wc_line = self.mrp_operation.routing_wc_line
vals = {'origin': self.mrp_operation.name,
'picking_type_id': wc_line.picking_type_id.id,
'invoice_state': 'none',
'mrp_production':
self.mrp_operation.production_id.id
}
picking = picking_obj.create(vals)
vals = {'out_picking': picking.id}
self.mrp_operation.write(vals)
vals = {'picking_id': picking.id}
move.write(vals)
return result
@api.one
def action_picking_create(self):
picking_obj = self.env['stock.picking']
result = super(PurchaseOrder, self).action_picking_create()
if self.mrp_operation:
cond = [('origin', '=', self.name)]
picking = picking_obj.search(cond, limit=1)
self.mrp_operation.in_picking = picking.id
picking.mrp_production = self.mrp_operation.production_id.id
return result
|
jbremer/androguard
|
refs/heads/master
|
androguard/core/api_specific_resources/api_permission_mappings/api_permission_mappings.py
|
16
|
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api9 as api9
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api10 as api10
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api14 as api14
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api15 as api15
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api16 as api16
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api17 as api17
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api18 as api18
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api19 as api19
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api21 as api21
import androguard.core.api_specific_resources.api_permission_mappings.api_permission_mappings_api22 as api22
AOSP_PERMISSIONS_MAPPINGS = {
"9" : {"AOSP_PERMISSIONS_BY_METHODS" : api9.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api9.AOSP_PERMISSIONS_BY_FIELDS},
"10" : {"AOSP_PERMISSIONS_BY_METHODS" : api10.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api10.AOSP_PERMISSIONS_BY_FIELDS},
"14" : {"AOSP_PERMISSIONS_BY_METHODS" : api14.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api14.AOSP_PERMISSIONS_BY_FIELDS},
"15" : {"AOSP_PERMISSIONS_BY_METHODS" : api15.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api15.AOSP_PERMISSIONS_BY_FIELDS},
"16" : {"AOSP_PERMISSIONS_BY_METHODS" : api16.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api16.AOSP_PERMISSIONS_BY_FIELDS},
"17" : {"AOSP_PERMISSIONS_BY_METHODS" : api17.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api17.AOSP_PERMISSIONS_BY_FIELDS},
"18" : {"AOSP_PERMISSIONS_BY_METHODS" : api18.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api18.AOSP_PERMISSIONS_BY_FIELDS},
"19" : {"AOSP_PERMISSIONS_BY_METHODS" : api19.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api19.AOSP_PERMISSIONS_BY_FIELDS},
"21" : {"AOSP_PERMISSIONS_BY_METHODS" : api21.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api21.AOSP_PERMISSIONS_BY_FIELDS},
"22" : {"AOSP_PERMISSIONS_BY_METHODS" : api22.AOSP_PERMISSIONS_BY_METHODS, "AOSP_PERMISSIONS_BY_FIELDS" : api22.AOSP_PERMISSIONS_BY_FIELDS},
}
|
MediaKraken/MediaKraken_Deployment
|
refs/heads/master
|
source/common/common_metadata_anime_scudlee.py
|
1
|
"""
Copyright (C) 2015 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import os
import time
from xml.dom import minidom
import xmltodict
from common import common_logging_elasticsearch_httpx
from . import common_file
from . import common_network
def mk_scudlee_fetch_xml():
"""
Fetch the anime list by scudlee for thetvdb crossreference
"""
# grab from github via direct raw link
if not os.path.isfile('./cache/anime-list.xml') \
or common_file.com_file_modification_timestamp('./cache/anime-list.xml') \
< (time.time() - (7 * 86400)):
common_network.mk_network_fetch_from_url(
'https://github.com/ScudLee/anime-lists/raw/master/anime-list.xml',
'./cache/anime-list.xml')
if not os.path.isfile('./cache/anime-movieset-list.xml') \
or common_file.com_file_modification_timestamp('./cache/anime-movieset-list.xml') \
< (time.time() - (7 * 86400)):
common_network.mk_network_fetch_from_url(
'https://github.com/ScudLee/anime-lists/raw/master/anime-movieset-list.xml',
'./cache/anime-movieset-list.xml')
def mk_scudlee_anime_list_parse(file_name='./cache/anime-list.xml'):
"""
Parse the anime list
"""
anime_cross_reference = []
file_handle = open(file_name, 'r')
itemlist = xmltodict.parse(file_handle.read())
file_handle.close()
for anime_data in itemlist['anime-list']['anime']:
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info',
message_text={'data': anime_data})
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info', message_text={
'key': list(anime_data.keys())})
try:
# to make sure not web, etc
tvdbid = str(int(anime_data['@tvdbid']))
except:
tvdbid = None
try:
imdbid = anime_data['@imdbid']
if imdbid == 'unknown':
imdbid = None
except KeyError:
imdbid = None
try:
default_tvseason = anime_data['@defaulttvdbseason']
except KeyError:
default_tvseason = None
try:
mapping_data = anime_data['mapping-list']
except KeyError:
mapping_data = None
try:
before_data = anime_data['before']
except KeyError:
before_data = None
anime_cross_reference.append((anime_data['@anidbid'], tvdbid, imdbid,
default_tvseason, mapping_data, before_data))
return anime_cross_reference
def mk_scudlee_anime_set_parse(file_name='./cache/anime-movieset-list.xml'):
"""
Parse the movieset list
"""
itemlist = minidom.parse(file_name).getElementsByTagName('set')
collection_list = []
for set_data in itemlist:
indiv_collection_list = []
for anime_data in set_data.getElementsByTagName('anime'):
indiv_collection_list.append(
anime_data.attributes['anidbid'].value)
indiv_titles_list = []
for anime_data in set_data.getElementsByTagName('title'):
indiv_titles_list.append(anime_data.firstChild.nodeValue)
collection_list.append((indiv_collection_list, indiv_titles_list))
return collection_list
|
lucciano/bigcouch
|
refs/heads/master
|
couchjs/scons/scons-local-2.0.1/SCons/Action.py
|
61
|
"""SCons.Action
This encapsulates information about executing any sort of action that
can build one or more target Nodes (typically files) from one or more
source Nodes (also typically files) given a specific Environment.
The base class here is ActionBase. The base class supplies just a few
OO utility methods and some generic methods for displaying information
about an Action in response to the various commands that control printing.
A second-level base class is _ActionAction. This extends ActionBase
by providing the methods that can be used to show and perform an
action. True Action objects will subclass _ActionAction; Action
factory class objects will subclass ActionBase.
The heavy lifting is handled by subclasses for the different types of
actions we might execute:
CommandAction
CommandGeneratorAction
FunctionAction
ListAction
The subclasses supply the following public interface methods used by
other modules:
__call__()
THE public interface, "calling" an Action object executes the
command or Python function. This also takes care of printing
a pre-substitution command for debugging purposes.
get_contents()
Fetches the "contents" of an Action for signature calculation
plus the varlist. This is what gets MD5 checksummed to decide
if a target needs to be rebuilt because its action changed.
genstring()
Returns a string representation of the Action *without*
command substitution, but allows a CommandGeneratorAction to
generate the right action based on the specified target,
source and env. This is used by the Signature subsystem
(through the Executor) to obtain an (imprecise) representation
of the Action operation for informative purposes.
Subclasses also supply the following methods for internal use within
this module:
__str__()
Returns a string approximation of the Action; no variable
substitution is performed.
execute()
The internal method that really, truly, actually handles the
execution of a command or Python function. This is used so
that the __call__() methods can take care of displaying any
pre-substitution representations, and *then* execute an action
without worrying about the specific Actions involved.
get_presig()
Fetches the "contents" of a subclass for signature calculation.
The varlist is added to this to produce the Action's contents.
strfunction()
Returns a substituted string representation of the Action.
This is used by the _ActionAction.show() command to display the
command/function that will be executed to generate the target(s).
There is a related independent ActionCaller class that looks like a
regular Action, and which serves as a wrapper for arbitrary functions
that we want to let the user specify the arguments to now, but actually
execute later (when an out-of-date check determines that it's needed to
be executed, for example). Objects of this class are returned by an
ActionFactory class that provides a __call__() method as a convenient
way for wrapping up the functions.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Action.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.compat
import dis
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import re
import sys
import subprocess
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Executor
import SCons.Util
import SCons.Subst
# we use these a lot, so try to optimize them
is_String = SCons.Util.is_String
is_List = SCons.Util.is_List
class _null(object):
pass
print_actions = 1
execute_actions = 1
print_actions_presub = 0
def rfile(n):
try:
return n.rfile()
except AttributeError:
return n
def default_exitstatfunc(s):
return s
try:
SET_LINENO = dis.SET_LINENO
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
except AttributeError:
remove_set_lineno_codes = lambda x: x
else:
def remove_set_lineno_codes(code):
result = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if op >= HAVE_ARGUMENT:
if op != SET_LINENO:
result.append(code[i:i+3])
i = i+3
else:
result.append(c)
i = i+1
return ''.join(result)
strip_quotes = re.compile('^[\'"](.*)[\'"]$')
def _callable_contents(obj):
"""Return the signature contents of a callable Python object.
"""
try:
# Test if obj is a method.
return _function_contents(obj.im_func)
except AttributeError:
try:
# Test if obj is a callable object.
return _function_contents(obj.__call__.im_func)
except AttributeError:
try:
# Test if obj is a code object.
return _code_contents(obj)
except AttributeError:
# Test if obj is a function object.
return _function_contents(obj)
def _object_contents(obj):
"""Return the signature contents of any Python object.
We have to handle the case where object contains a code object
since it can be pickled directly.
"""
try:
# Test if obj is a method.
return _function_contents(obj.im_func)
except AttributeError:
try:
# Test if obj is a callable object.
return _function_contents(obj.__call__.im_func)
except AttributeError:
try:
# Test if obj is a code object.
return _code_contents(obj)
except AttributeError:
try:
# Test if obj is a function object.
return _function_contents(obj)
except AttributeError:
# Should be a pickable Python object.
try:
return pickle.dumps(obj)
except (pickle.PicklingError, TypeError):
# This is weird, but it seems that nested classes
# are unpickable. The Python docs say it should
# always be a PicklingError, but some Python
# versions seem to return TypeError. Just do
# the best we can.
return str(obj)
def _code_contents(code):
"""Return the signature contents of a code object.
By providing direct access to the code object of the
function, Python makes this extremely easy. Hooray!
Unfortunately, older versions of Python include line
number indications in the compiled byte code. Boo!
So we remove the line number byte codes to prevent
recompilations from moving a Python function.
"""
contents = []
# The code contents depends on the number of local variables
# but not their actual names.
contents.append("%s,%s" % (code.co_argcount, len(code.co_varnames)))
try:
contents.append(",%s,%s" % (len(code.co_cellvars), len(code.co_freevars)))
except AttributeError:
# Older versions of Python do not support closures.
contents.append(",0,0")
# The code contents depends on any constants accessed by the
# function. Note that we have to call _object_contents on each
# constants because the code object of nested functions can
# show-up among the constants.
#
# Note that we also always ignore the first entry of co_consts
# which contains the function doc string. We assume that the
# function does not access its doc string.
contents.append(',(' + ','.join(map(_object_contents,code.co_consts[1:])) + ')')
# The code contents depends on the variable names used to
# accessed global variable, as changing the variable name changes
# the variable actually accessed and therefore changes the
# function result.
contents.append(',(' + ','.join(map(_object_contents,code.co_names)) + ')')
# The code contents depends on its actual code!!!
contents.append(',(' + str(remove_set_lineno_codes(code.co_code)) + ')')
return ''.join(contents)
def _function_contents(func):
"""Return the signature contents of a function."""
contents = [_code_contents(func.func_code)]
# The function contents depends on the value of defaults arguments
if func.func_defaults:
contents.append(',(' + ','.join(map(_object_contents,func.func_defaults)) + ')')
else:
contents.append(',()')
# The function contents depends on the closure captured cell values.
try:
closure = func.func_closure or []
except AttributeError:
# Older versions of Python do not support closures.
closure = []
#xxx = [_object_contents(x.cell_contents) for x in closure]
try:
xxx = [_object_contents(x.cell_contents) for x in closure]
except AttributeError:
xxx = []
contents.append(',(' + ','.join(xxx) + ')')
return ''.join(contents)
def _actionAppend(act1, act2):
# This function knows how to slap two actions together.
# Mainly, it handles ListActions by concatenating into
# a single ListAction.
a1 = Action(act1)
a2 = Action(act2)
if a1 is None or a2 is None:
raise TypeError("Cannot append %s to %s" % (type(act1), type(act2)))
if isinstance(a1, ListAction):
if isinstance(a2, ListAction):
return ListAction(a1.list + a2.list)
else:
return ListAction(a1.list + [ a2 ])
else:
if isinstance(a2, ListAction):
return ListAction([ a1 ] + a2.list)
else:
return ListAction([ a1, a2 ])
def _do_create_keywords(args, kw):
"""This converts any arguments after the action argument into
their equivalent keywords and adds them to the kw argument.
"""
v = kw.get('varlist', ())
# prevent varlist="FOO" from being interpreted as ['F', 'O', 'O']
if is_String(v): v = (v,)
kw['varlist'] = tuple(v)
if args:
# turn positional args into equivalent keywords
cmdstrfunc = args[0]
if cmdstrfunc is None or is_String(cmdstrfunc):
kw['cmdstr'] = cmdstrfunc
elif callable(cmdstrfunc):
kw['strfunction'] = cmdstrfunc
else:
raise SCons.Errors.UserError(
'Invalid command display variable type. '
'You must either pass a string or a callback which '
'accepts (target, source, env) as parameters.')
if len(args) > 1:
kw['varlist'] = args[1:] + kw['varlist']
if kw.get('strfunction', _null) is not _null \
and kw.get('cmdstr', _null) is not _null:
raise SCons.Errors.UserError(
'Cannot have both strfunction and cmdstr args to Action()')
def _do_create_action(act, kw):
"""This is the actual "implementation" for the
Action factory method, below. This handles the
fact that passing lists to Action() itself has
different semantics than passing lists as elements
of lists.
The former will create a ListAction, the latter
will create a CommandAction by converting the inner
list elements to strings."""
if isinstance(act, ActionBase):
return act
if is_List(act):
return CommandAction(act, **kw)
if callable(act):
try:
gen = kw['generator']
del kw['generator']
except KeyError:
gen = 0
if gen:
action_type = CommandGeneratorAction
else:
action_type = FunctionAction
return action_type(act, kw)
if is_String(act):
var=SCons.Util.get_environment_var(act)
if var:
# This looks like a string that is purely an Environment
# variable reference, like "$FOO" or "${FOO}". We do
# something special here...we lazily evaluate the contents
# of that Environment variable, so a user could put something
# like a function or a CommandGenerator in that variable
# instead of a string.
return LazyAction(var, kw)
commands = str(act).split('\n')
if len(commands) == 1:
return CommandAction(commands[0], **kw)
# The list of string commands may include a LazyAction, so we
# reprocess them via _do_create_list_action.
return _do_create_list_action(commands, kw)
return None
def _do_create_list_action(act, kw):
"""A factory for list actions. Convert the input list into Actions
and then wrap them in a ListAction."""
acts = []
for a in act:
aa = _do_create_action(a, kw)
if aa is not None: acts.append(aa)
if not acts:
return ListAction([])
elif len(acts) == 1:
return acts[0]
else:
return ListAction(acts)
def Action(act, *args, **kw):
"""A factory for action objects."""
# Really simple: the _do_create_* routines do the heavy lifting.
_do_create_keywords(args, kw)
if is_List(act):
return _do_create_list_action(act, kw)
return _do_create_action(act, kw)
class ActionBase(object):
"""Base class for all types of action objects that can be held by
other objects (Builders, Executors, etc.) This provides the
common methods for manipulating and combining those actions."""
def __cmp__(self, other):
return cmp(self.__dict__, other)
def no_batch_key(self, env, target, source):
return None
batch_key = no_batch_key
def genstring(self, target, source, env):
return str(self)
def get_contents(self, target, source, env):
result = [ self.get_presig(target, source, env) ]
# This should never happen, as the Action() factory should wrap
# the varlist, but just in case an action is created directly,
# we duplicate this check here.
vl = self.get_varlist(target, source, env)
if is_String(vl): vl = (vl,)
for v in vl:
result.append(env.subst('${'+v+'}'))
return ''.join(result)
def __add__(self, other):
return _actionAppend(self, other)
def __radd__(self, other):
return _actionAppend(other, self)
def presub_lines(self, env):
# CommandGeneratorAction needs a real environment
# in order to return the proper string here, since
# it may call LazyAction, which looks up a key
# in that env. So we temporarily remember the env here,
# and CommandGeneratorAction will use this env
# when it calls its _generate method.
self.presub_env = env
lines = str(self).split('\n')
self.presub_env = None # don't need this any more
return lines
def get_varlist(self, target, source, env, executor=None):
return self.varlist
def get_targets(self, env, executor):
"""
Returns the type of targets ($TARGETS, $CHANGED_TARGETS) used
by this action.
"""
return self.targets
class _ActionAction(ActionBase):
"""Base class for actions that create output objects."""
def __init__(self, cmdstr=_null, strfunction=_null, varlist=(),
presub=_null, chdir=None, exitstatfunc=None,
batch_key=None, targets='$TARGETS',
**kw):
self.cmdstr = cmdstr
if strfunction is not _null:
if strfunction is None:
self.cmdstr = None
else:
self.strfunction = strfunction
self.varlist = varlist
self.presub = presub
self.chdir = chdir
if not exitstatfunc:
exitstatfunc = default_exitstatfunc
self.exitstatfunc = exitstatfunc
self.targets = targets
if batch_key:
if not callable(batch_key):
# They have set batch_key, but not to their own
# callable. The default behavior here will batch
# *all* targets+sources using this action, separated
# for each construction environment.
def default_batch_key(self, env, target, source):
return (id(self), id(env))
batch_key = default_batch_key
SCons.Util.AddMethod(self, batch_key, 'batch_key')
def print_cmd_line(self, s, target, source, env):
sys.stdout.write(s + u"\n")
def __call__(self, target, source, env,
exitstatfunc=_null,
presub=_null,
show=_null,
execute=_null,
chdir=_null,
executor=None):
if not is_List(target):
target = [target]
if not is_List(source):
source = [source]
if presub is _null:
presub = self.presub
if presub is _null:
presub = print_actions_presub
if exitstatfunc is _null: exitstatfunc = self.exitstatfunc
if show is _null: show = print_actions
if execute is _null: execute = execute_actions
if chdir is _null: chdir = self.chdir
save_cwd = None
if chdir:
save_cwd = os.getcwd()
try:
chdir = str(chdir.abspath)
except AttributeError:
if not is_String(chdir):
if executor:
chdir = str(executor.batches[0].targets[0].dir)
else:
chdir = str(target[0].dir)
if presub:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
t = ' and '.join(map(str, target))
l = '\n '.join(self.presub_lines(env))
out = u"Building %s with action:\n %s\n" % (t, l)
sys.stdout.write(out)
cmd = None
if show and self.strfunction:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
try:
cmd = self.strfunction(target, source, env, executor)
except TypeError:
cmd = self.strfunction(target, source, env)
if cmd:
if chdir:
cmd = ('os.chdir(%s)\n' % repr(chdir)) + cmd
try:
get = env.get
except AttributeError:
print_func = self.print_cmd_line
else:
print_func = get('PRINT_CMD_LINE_FUNC')
if not print_func:
print_func = self.print_cmd_line
print_func(cmd, target, source, env)
stat = 0
if execute:
if chdir:
os.chdir(chdir)
try:
stat = self.execute(target, source, env, executor=executor)
if isinstance(stat, SCons.Errors.BuildError):
s = exitstatfunc(stat.status)
if s:
stat.status = s
else:
stat = s
else:
stat = exitstatfunc(stat)
finally:
if save_cwd:
os.chdir(save_cwd)
if cmd and save_cwd:
print_func('os.chdir(%s)' % repr(save_cwd), target, source, env)
return stat
def _string_from_cmd_list(cmd_list):
"""Takes a list of command line arguments and returns a pretty
representation for printing."""
cl = []
for arg in map(str, cmd_list):
if ' ' in arg or '\t' in arg:
arg = '"' + arg + '"'
cl.append(arg)
return ' '.join(cl)
# A fiddlin' little function that has an 'import SCons.Environment' which
# can't be moved to the top level without creating an import loop. Since
# this import creates a local variable named 'SCons', it blocks access to
# the global variable, so we move it here to prevent complaints about local
# variables being used uninitialized.
default_ENV = None
def get_default_ENV(env):
global default_ENV
try:
return env['ENV']
except KeyError:
if not default_ENV:
import SCons.Environment
# This is a hideously expensive way to get a default shell
# environment. What it really should do is run the platform
# setup to get the default ENV. Fortunately, it's incredibly
# rare for an Environment not to have a shell environment, so
# we're not going to worry about it overmuch.
default_ENV = SCons.Environment.Environment()['ENV']
return default_ENV
# This function is still in draft mode. We're going to need something like
# it in the long run as more and more places use subprocess, but I'm sure
# it'll have to be tweaked to get the full desired functionality.
# one special arg (so far?), 'error', to tell what to do with exceptions.
def _subproc(scons_env, cmd, error = 'ignore', **kw):
"""Do common setup for a subprocess.Popen() call"""
# allow std{in,out,err} to be "'devnull'"
io = kw.get('stdin')
if is_String(io) and io == 'devnull':
kw['stdin'] = open(os.devnull)
io = kw.get('stdout')
if is_String(io) and io == 'devnull':
kw['stdout'] = open(os.devnull, 'w')
io = kw.get('stderr')
if is_String(io) and io == 'devnull':
kw['stderr'] = open(os.devnull, 'w')
# Figure out what shell environment to use
ENV = kw.get('env', None)
if ENV is None: ENV = get_default_ENV(scons_env)
# Ensure that the ENV values are all strings:
new_env = {}
for key, value in ENV.items():
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
# in an environment variable:
value = SCons.Util.flatten_sequence(value)
new_env[key] = os.pathsep.join(map(str, value))
else:
# It's either a string or something else. If it's a string,
# we still want to call str() because it might be a *Unicode*
# string, which makes subprocess.Popen() gag. If it isn't a
# string or a list, then we just coerce it to a string, which
# is the proper way to handle Dir and File instances and will
# produce something reasonable for just about everything else:
new_env[key] = str(value)
kw['env'] = new_env
try:
#FUTURE return subprocess.Popen(cmd, **kw)
return subprocess.Popen(cmd, **kw)
except EnvironmentError, e:
if error == 'raise': raise
# return a dummy Popen instance that only returns error
class dummyPopen(object):
def __init__(self, e): self.exception = e
def communicate(self): return ('','')
def wait(self): return -self.exception.errno
stdin = None
class f(object):
def read(self): return ''
def readline(self): return ''
stdout = stderr = f()
return dummyPopen(e)
class CommandAction(_ActionAction):
"""Class for command-execution actions."""
def __init__(self, cmd, **kw):
# Cmd can actually be a list or a single item; if it's a
# single item it should be the command string to execute; if a
# list then it should be the words of the command string to
# execute. Only a single command should be executed by this
# object; lists of commands should be handled by embedding
# these objects in a ListAction object (which the Action()
# factory above does). cmd will be passed to
# Environment.subst_list() for substituting environment
# variables.
if __debug__: logInstanceCreation(self, 'Action.CommandAction')
_ActionAction.__init__(self, **kw)
if is_List(cmd):
if list(filter(is_List, cmd)):
raise TypeError("CommandAction should be given only " \
"a single command")
self.cmd_list = cmd
def __str__(self):
if is_List(self.cmd_list):
return ' '.join(map(str, self.cmd_list))
return str(self.cmd_list)
def process(self, target, source, env, executor=None):
if executor:
result = env.subst_list(self.cmd_list, 0, executor=executor)
else:
result = env.subst_list(self.cmd_list, 0, target, source)
silent = None
ignore = None
while True:
try: c = result[0][0][0]
except IndexError: c = None
if c == '@': silent = 1
elif c == '-': ignore = 1
else: break
result[0][0] = result[0][0][1:]
try:
if not result[0][0]:
result[0] = result[0][1:]
except IndexError:
pass
return result, ignore, silent
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
cmd_list, ignore, silent = self.process(target, source, env, executor)
if silent:
return ''
return _string_from_cmd_list(cmd_list[0])
def execute(self, target, source, env, executor=None):
"""Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
"""
escape_list = SCons.Subst.escape_list
flatten_sequence = SCons.Util.flatten_sequence
try:
shell = env['SHELL']
except KeyError:
raise SCons.Errors.UserError('Missing SHELL construction variable.')
try:
spawn = env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
else:
if is_String(spawn):
spawn = env.subst(spawn, raw=1, conv=lambda x: x)
escape = env.get('ESCAPE', lambda x: x)
ENV = get_default_ENV(env)
# Ensure that the ENV values are all strings:
for key, value in ENV.items():
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
# path list, because that's a pretty common list-like
# value to stick in an environment variable:
value = flatten_sequence(value)
ENV[key] = os.pathsep.join(map(str, value))
else:
# If it isn't a string or a list, then we just coerce
# it to a string, which is the proper way to handle
# Dir and File instances and will produce something
# reasonable for just about everything else:
ENV[key] = str(value)
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
cmd_list, ignore, silent = self.process(target, list(map(rfile, source)), env, executor)
# Use len() to filter out any "command" that's zero-length.
for cmd_line in filter(len, cmd_list):
# Escape the command line for the interpreter we are using.
cmd_line = escape_list(cmd_line, escape)
result = spawn(shell, escape, cmd_line[0], cmd_line, ENV)
if not ignore and result:
msg = "Error %s" % result
return SCons.Errors.BuildError(errstr=msg,
status=result,
action=self,
command=cmd_line)
return 0
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
from SCons.Subst import SUBST_SIG
cmd = self.cmd_list
if is_List(cmd):
cmd = ' '.join(map(str, cmd))
else:
cmd = str(cmd)
if executor:
return env.subst_target_source(cmd, SUBST_SIG, executor=executor)
else:
return env.subst_target_source(cmd, SUBST_SIG, target, source)
def get_implicit_deps(self, target, source, env, executor=None):
icd = env.get('IMPLICIT_COMMAND_DEPENDENCIES', True)
if is_String(icd) and icd[:1] == '$':
icd = env.subst(icd)
if not icd or icd in ('0', 'None'):
return []
from SCons.Subst import SUBST_SIG
if executor:
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, executor=executor)
else:
cmd_list = env.subst_list(self.cmd_list, SUBST_SIG, target, source)
res = []
for cmd_line in cmd_list:
if cmd_line:
d = str(cmd_line[0])
m = strip_quotes.match(d)
if m:
d = m.group(1)
d = env.WhereIs(d)
if d:
res.append(env.fs.File(d))
return res
class CommandGeneratorAction(ActionBase):
"""Class for command-generator actions."""
def __init__(self, generator, kw):
if __debug__: logInstanceCreation(self, 'Action.CommandGeneratorAction')
self.generator = generator
self.gen_kw = kw
self.varlist = kw.get('varlist', ())
self.targets = kw.get('targets', '$TARGETS')
def _generate(self, target, source, env, for_signature, executor=None):
# ensure that target is a list, to make it easier to write
# generator functions:
if not is_List(target):
target = [target]
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
ret = self.generator(target=target,
source=source,
env=env,
for_signature=for_signature)
gen_cmd = Action(ret, **self.gen_kw)
if not gen_cmd:
raise SCons.Errors.UserError("Object returned from command generator: %s cannot be used to create an Action." % repr(ret))
return gen_cmd
def __str__(self):
try:
env = self.presub_env
except AttributeError:
env = None
if env is None:
env = SCons.Defaults.DefaultEnvironment()
act = self._generate([], [], env, 1)
return str(act)
def batch_key(self, env, target, source):
return self._generate(target, source, env, 1).batch_key(env, target, source)
def genstring(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).genstring(target, source, env)
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null,
show=_null, execute=_null, chdir=_null, executor=None):
act = self._generate(target, source, env, 0, executor)
if act is None:
raise UserError("While building `%s': "
"Cannot deduce file extension from source files: %s"
% (repr(list(map(str, target))), repr(list(map(str, source)))))
return act(target, source, env, exitstatfunc, presub,
show, execute, chdir, executor)
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
def get_implicit_deps(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).get_implicit_deps(target, source, env)
def get_varlist(self, target, source, env, executor=None):
return self._generate(target, source, env, 1, executor).get_varlist(target, source, env, executor)
def get_targets(self, env, executor):
return self._generate(None, None, env, 1, executor).get_targets(env, executor)
# A LazyAction is a kind of hybrid generator and command action for
# strings of the form "$VAR". These strings normally expand to other
# strings (think "$CCCOM" to "$CC -c -o $TARGET $SOURCE"), but we also
# want to be able to replace them with functions in the construction
# environment. Consequently, we want lazy evaluation and creation of
# an Action in the case of the function, but that's overkill in the more
# normal case of expansion to other strings.
#
# So we do this with a subclass that's both a generator *and*
# a command action. The overridden methods all do a quick check
# of the construction variable, and if it's a string we just call
# the corresponding CommandAction method to do the heavy lifting.
# If not, then we call the same-named CommandGeneratorAction method.
# The CommandGeneratorAction methods work by using the overridden
# _generate() method, that is, our own way of handling "generation" of
# an action based on what's in the construction variable.
class LazyAction(CommandGeneratorAction, CommandAction):
def __init__(self, var, kw):
if __debug__: logInstanceCreation(self, 'Action.LazyAction')
#FUTURE CommandAction.__init__(self, '${'+var+'}', **kw)
CommandAction.__init__(self, '${'+var+'}', **kw)
self.var = SCons.Util.to_String(var)
self.gen_kw = kw
def get_parent_class(self, env):
c = env.get(self.var)
if is_String(c) and not '\n' in c:
return CommandAction
return CommandGeneratorAction
def _generate_cache(self, env):
if env:
c = env.get(self.var, '')
else:
c = ''
gen_cmd = Action(c, **self.gen_kw)
if not gen_cmd:
raise SCons.Errors.UserError("$%s value %s cannot be used to create an Action." % (self.var, repr(c)))
return gen_cmd
def _generate(self, target, source, env, for_signature, executor=None):
return self._generate_cache(env)
def __call__(self, target, source, env, *args, **kw):
c = self.get_parent_class(env)
return c.__call__(self, target, source, env, *args, **kw)
def get_presig(self, target, source, env):
c = self.get_parent_class(env)
return c.get_presig(self, target, source, env)
def get_varlist(self, target, source, env, executor=None):
c = self.get_parent_class(env)
return c.get_varlist(self, target, source, env, executor)
class FunctionAction(_ActionAction):
"""Class for Python function actions."""
def __init__(self, execfunction, kw):
if __debug__: logInstanceCreation(self, 'Action.FunctionAction')
self.execfunction = execfunction
try:
self.funccontents = _callable_contents(execfunction)
except AttributeError:
try:
# See if execfunction will do the heavy lifting for us.
self.gc = execfunction.get_contents
except AttributeError:
# This is weird, just do the best we can.
self.funccontents = _object_contents(execfunction)
_ActionAction.__init__(self, **kw)
def function_name(self):
try:
return self.execfunction.__name__
except AttributeError:
try:
return self.execfunction.__class__.__name__
except AttributeError:
return "unknown_python_function"
def strfunction(self, target, source, env, executor=None):
if self.cmdstr is None:
return None
if self.cmdstr is not _null:
from SCons.Subst import SUBST_RAW
if executor:
c = env.subst(self.cmdstr, SUBST_RAW, executor=executor)
else:
c = env.subst(self.cmdstr, SUBST_RAW, target, source)
if c:
return c
def array(a):
def quote(s):
try:
str_for_display = s.str_for_display
except AttributeError:
s = repr(s)
else:
s = str_for_display()
return s
return '[' + ", ".join(map(quote, a)) + ']'
try:
strfunc = self.execfunction.strfunction
except AttributeError:
pass
else:
if strfunc is None:
return None
if callable(strfunc):
return strfunc(target, source, env)
name = self.function_name()
tstr = array(target)
sstr = array(source)
return "%s(%s, %s)" % (name, tstr, sstr)
def __str__(self):
name = self.function_name()
if name == 'ActionCaller':
return str(self.execfunction)
return "%s(target, source, env)" % name
def execute(self, target, source, env, executor=None):
exc_info = (None,None,None)
try:
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
rsources = list(map(rfile, source))
try:
result = self.execfunction(target=target, source=rsources, env=env)
except KeyboardInterrupt, e:
raise
except SystemExit, e:
raise
except Exception, e:
result = e
exc_info = sys.exc_info()
if result:
result = SCons.Errors.convert_to_BuildError(result, exc_info)
result.node=target
result.action=self
try:
result.command=self.strfunction(target, source, env, executor)
except TypeError:
result.command=self.strfunction(target, source, env)
# FIXME: This maintains backward compatibility with respect to
# which type of exceptions were returned by raising an
# exception and which ones were returned by value. It would
# probably be best to always return them by value here, but
# some codes do not check the return value of Actions and I do
# not have the time to modify them at this point.
if (exc_info[1] and
not isinstance(exc_info[1],EnvironmentError)):
raise result
return result
finally:
# Break the cycle between the traceback object and this
# function stack frame. See the sys.exc_info() doc info for
# more information about this issue.
del exc_info
def get_presig(self, target, source, env):
"""Return the signature contents of this callable action."""
try:
return self.gc(target, source, env)
except AttributeError:
return self.funccontents
def get_implicit_deps(self, target, source, env):
return []
class ListAction(ActionBase):
"""Class for lists of other actions."""
def __init__(self, actionlist):
if __debug__: logInstanceCreation(self, 'Action.ListAction')
def list_of_actions(x):
if isinstance(x, ActionBase):
return x
return Action(x)
self.list = list(map(list_of_actions, actionlist))
# our children will have had any varlist
# applied; we don't need to do it again
self.varlist = ()
self.targets = '$TARGETS'
def genstring(self, target, source, env):
return '\n'.join([a.genstring(target, source, env) for a in self.list])
def __str__(self):
return '\n'.join(map(str, self.list))
def presub_lines(self, env):
return SCons.Util.flatten_sequence(
[a.presub_lines(env) for a in self.list])
def get_presig(self, target, source, env):
"""Return the signature contents of this action list.
Simple concatenation of the signatures of the elements.
"""
return "".join([x.get_contents(target, source, env) for x in self.list])
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null,
show=_null, execute=_null, chdir=_null, executor=None):
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
for act in self.list:
stat = act(target, source, env, exitstatfunc, presub,
show, execute, chdir, executor)
if stat:
return stat
return 0
def get_implicit_deps(self, target, source, env):
result = []
for act in self.list:
result.extend(act.get_implicit_deps(target, source, env))
return result
def get_varlist(self, target, source, env, executor=None):
result = SCons.Util.OrderedDict()
for act in self.list:
for var in act.get_varlist(target, source, env, executor):
result[var] = True
return list(result.keys())
class ActionCaller(object):
"""A class for delaying calling an Action function with specific
(positional and keyword) arguments until the Action is actually
executed.
This class looks to the rest of the world like a normal Action object,
but what it's really doing is hanging on to the arguments until we
have a target, source and env to use for the expansion.
"""
def __init__(self, parent, args, kw):
self.parent = parent
self.args = args
self.kw = kw
def get_contents(self, target, source, env):
actfunc = self.parent.actfunc
try:
# "self.actfunc" is a function.
contents = str(actfunc.func_code.co_code)
except AttributeError:
# "self.actfunc" is a callable object.
try:
contents = str(actfunc.__call__.im_func.func_code.co_code)
except AttributeError:
# No __call__() method, so it might be a builtin
# or something like that. Do the best we can.
contents = str(actfunc)
contents = remove_set_lineno_codes(contents)
return contents
def subst(self, s, target, source, env):
# If s is a list, recursively apply subst()
# to every element in the list
if is_List(s):
result = []
for elem in s:
result.append(self.subst(elem, target, source, env))
return self.parent.convert(result)
# Special-case hack: Let a custom function wrapped in an
# ActionCaller get at the environment through which the action
# was called by using this hard-coded value as a special return.
if s == '$__env__':
return env
elif is_String(s):
return env.subst(s, 1, target, source)
return self.parent.convert(s)
def subst_args(self, target, source, env):
return [self.subst(x, target, source, env) for x in self.args]
def subst_kw(self, target, source, env):
kw = {}
for key in self.kw.keys():
kw[key] = self.subst(self.kw[key], target, source, env)
return kw
def __call__(self, target, source, env, executor=None):
args = self.subst_args(target, source, env)
kw = self.subst_kw(target, source, env)
return self.parent.actfunc(*args, **kw)
def strfunction(self, target, source, env):
args = self.subst_args(target, source, env)
kw = self.subst_kw(target, source, env)
return self.parent.strfunc(*args, **kw)
def __str__(self):
return self.parent.strfunc(*self.args, **self.kw)
class ActionFactory(object):
"""A factory class that will wrap up an arbitrary function
as an SCons-executable Action object.
The real heavy lifting here is done by the ActionCaller class.
We just collect the (positional and keyword) arguments that we're
called with and give them to the ActionCaller object we create,
so it can hang onto them until it needs them.
"""
def __init__(self, actfunc, strfunc, convert=lambda x: x):
self.actfunc = actfunc
self.strfunc = strfunc
self.convert = convert
def __call__(self, *args, **kw):
ac = ActionCaller(self, args, kw)
action = Action(ac, strfunction=ac.strfunction)
return action
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
ConnorGBrewster/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/protocol_array_wsh.py
|
265
|
#!/usr/bin/python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
line = request.headers_in.get('Sec-WebSocket-Protocol')
request.ws_protocol = line.split(',', 1)[0]
#pass
def web_socket_transfer_data(request):
while True:
msgutil.send_message(request, request.ws_protocol)
return
|
mldbai/mldb
|
refs/heads/master
|
testing/ranking_test.py
|
1
|
#
# ranking_procedure_test.py
# Mich, 2016-01-11
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
class RankingTest(MldbUnitTest): # noqa
def test_base(self):
mldb.put('/v1/datasets/ds', {
'type' : 'sparse.mutable',
})
size = 123
for i in range(size):
mldb.post('/v1/datasets/ds/rows', {
'rowName' : 'row{}'.format(i),
'columns' : [['score', i, 1], ['index', i * 2, 2], ['prob', i * 3, 3]]
})
mldb.post('/v1/datasets/ds/commit')
mldb.post('/v1/procedures', {
'type' : 'ranking',
'params' : {
'inputData' : 'SELECT * FROM ds ORDER BY score',
'outputDataset' : 'out',
'rankingType' : 'index',
'runOnCreation' : True
}
})
# MLDB-1267
mldb.log(mldb.query("SELECT * FROM out"))
res = mldb.get("/v1/query", q="SELECT latest_timestamp({*}) FROM out",
format='table')
data = res.json()
self.assertEqual(data[1][1], '1970-01-01T00:00:01Z')
mldb.log(data[1])
mldb.put('/v1/datasets/result', {
'type' : 'merged',
'params' : {
'datasets' : [
{'id' : 'ds'},
{'id' : 'out'}
]
}
})
res = mldb.get('/v1/query',
q='SELECT score, rank FROM result ORDER BY rank',
format='table')
data = res.json()
self.assertEqual(data[1][1], 0, str(data[1]))
self.assertEqual(data[1][2], 0, str(data[1]))
self.assertEqual(data[2][1], 1, str(data[2]))
self.assertEqual(data[2][2], 1, str(data[2]))
self.assertEqual(data[size][1], size - 1, str(data[size]))
self.assertEqual(data[size][2], size - 1, str(data[size]))
if __name__ == '__main__':
mldb.run_tests()
|
ezyang/offlineimap
|
refs/heads/master
|
test/OLItest/TestRunner.py
|
8
|
# Copyright (C) 2012- Sebastian Spaeth & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import imaplib
import unittest
import logging
import os
import re
import sys
import shutil
import subprocess
import tempfile
import random
random.seed()
from offlineimap.CustomConfig import CustomConfigParser
from . import default_conf
class OLITestLib():
cred_file = None
testdir = None
"""Absolute path of the current temporary test directory"""
cmd = None
"""command that will be executed to invoke offlineimap"""
def __init__(self, cred_file = None, cmd='offlineimap'):
"""
:param cred_file: file of the configuration
snippet for authenticating against the test IMAP server(s).
:param cmd: command that will be executed to invoke offlineimap"""
OLITestLib.cred_file = cred_file
if not os.path.isfile(cred_file):
raise UserWarning("Please copy 'credentials.conf.sample' to '%s' "
"and set your credentials there." % cred_file)
OLITestLib.cmd = cmd
@classmethod
def create_test_dir(cls, suffix=''):
"""Creates a test directory and places OLI config there
Note that this is a class method. There can only be one test
directory at a time. OLITestLib is not suited for running
several tests in parallel. The user is responsible for
cleaning that up herself."""
assert cls.cred_file != None
# creating temporary dir for testing in same dir as credentials.conf
cls.testdir = os.path.abspath(
tempfile.mkdtemp(prefix='tmp_%s_'%suffix,
dir=os.path.dirname(cls.cred_file)))
cls.write_config_file()
return cls.testdir
@classmethod
def get_default_config(cls):
"""Creates a default ConfigParser file and returns it
The returned config can be manipulated and then saved with
write_config_file()"""
#TODO, only do first time and cache then for subsequent calls?
assert cls.cred_file != None
assert cls.testdir != None
config = CustomConfigParser()
config.readfp(default_conf)
default_conf.seek(0) # rewind config_file to start
config.read(cls.cred_file)
config.set("general", "metadata", cls.testdir)
return config
@classmethod
def write_config_file(cls, config=None):
"""Creates a OLI configuration file
It is created in testdir (so create_test_dir has to be called
earlier) using the credentials information given (so they had
to be set earlier). Failure to do either of them will raise an
AssertionException. If config is None, a default one will be
used via get_default_config, otherwise it needs to be a config
object derived from that."""
if config is None:
config = cls.get_default_config()
localfolders = os.path.join(cls.testdir, 'mail')
config.set("Repository Maildir", "localfolders", localfolders)
with open(os.path.join(cls.testdir, 'offlineimap.conf'), "wt") as f:
config.write(f)
@classmethod
def delete_test_dir(cls):
"""Deletes the current test directory
The users is responsible for cleaning that up herself."""
if os.path.isdir(cls.testdir):
shutil.rmtree(cls.testdir)
@classmethod
def run_OLI(cls):
"""Runs OfflineImap
:returns: (rescode, stdout (as unicode))
"""
try:
output = subprocess.check_output(
[cls.cmd,
"-c%s" % os.path.join(cls.testdir, 'offlineimap.conf')],
shell=False)
except subprocess.CalledProcessError as e:
return (e.returncode, e.output.decode('utf-8'))
return (0, output.decode('utf-8'))
@classmethod
def delete_remote_testfolders(cls, reponame=None):
"""Delete all INBOX.OLITEST* folders on the remote IMAP repository
reponame: All on `reponame` or all IMAP-type repositories if None"""
config = cls.get_default_config()
if reponame:
sections = ['Repository {0}'.format(reponame)]
else:
sections = [r for r in config.sections() \
if r.startswith('Repository')]
sections = filter(lambda s: \
config.get(s, 'Type').lower() == 'imap',
sections)
for sec in sections:
# Connect to each IMAP repo and delete all folders
# matching the folderfilter setting. We only allow basic
# settings and no fancy password getting here...
# 1) connect and get dir listing
host = config.get(sec, 'remotehost')
user = config.get(sec, 'remoteuser')
passwd = config.get(sec, 'remotepass')
imapobj = imaplib.IMAP4(host)
imapobj.login(user, passwd)
res_t, data = imapobj.list()
assert res_t == 'OK'
dirs = []
for d in data:
m = re.search(br''' # Find last quote
"((?: # Non-tripple quoted can contain...
[^"] | # a non-quote
\\" # a backslashded quote
)*)" # closing quote
[^"]*$ # followed by no more quotes
''', d, flags=re.VERBOSE)
folder = bytearray(m.group(1))
#folder = folder.replace(br'\"', b'"') # remove quoting
dirs.append(folder)
# 2) filter out those not starting with INBOX.OLItest and del...
dirs = [d for d in dirs if d.startswith(b'INBOX.OLItest')]
for folder in dirs:
res_t, data = imapobj.delete(b'\"'+folder+b'\"')
assert res_t == 'OK', "Folder deletion of {0} failed with error"\
":\n{1} {2}".format(folder.decode('utf-8'), res_t, data)
imapobj.logout()
@classmethod
def create_maildir(cls, folder):
"""Create empty maildir 'folder' in our test maildir
Does not fail if it already exists"""
assert cls.testdir != None
maildir = os.path.join(cls.testdir, 'mail', folder)
for subdir in ('','tmp','cur','new'):
try:
os.makedirs(os.path.join(maildir, subdir))
except OSError as e:
if e.errno != 17: # 'already exists' is ok.
raise
@classmethod
def delete_maildir(cls, folder):
"""Delete maildir 'folder' in our test maildir
Does not fail if not existing"""
assert cls.testdir != None
maildir = os.path.join(cls.testdir, 'mail', folder)
shutil.rmtree(maildir, ignore_errors=True)
@classmethod
def create_mail(cls, folder, mailfile=None, content=None):
"""Create a mail in maildir 'folder'/new
Use default mailfilename if not given.
Use some default content if not given"""
assert cls.testdir != None
while True: # Loop till we found a unique filename
mailfile = '{0}:2,'.format(random.randint(0,999999999))
mailfilepath = os.path.join(cls.testdir, 'mail',
folder, 'new', mailfile)
if not os.path.isfile(mailfilepath):
break
with open(mailfilepath,"wb") as mailf:
mailf.write(b'''From: test <test@offlineimap.org>
Subject: Boo
Date: 1 Jan 1980
To: test@offlineimap.org
Content here.''')
@classmethod
def count_maildir_mails(cls, folder):
"""Returns the number of mails in maildir 'folder'
Counting only those in cur&new (ignoring tmp)."""
assert cls.testdir != None
maildir = os.path.join(cls.testdir, 'mail', folder)
boxes, mails = 0, 0
for dirpath, dirs, files in os.walk(maildir, False):
if set(dirs) == set(['cur', 'new', 'tmp']):
# New maildir folder
boxes += 1
#raise RuntimeError("%s is not Maildir" % maildir)
if dirpath.endswith(('/cur', '/new')):
mails += len(files)
return boxes, mails
# find UID in a maildir filename
re_uidmatch = re.compile(',U=(\d+)')
@classmethod
def get_maildir_uids(cls, folder):
"""Returns a list of maildir mail uids, 'None' if no valid uid"""
assert cls.testdir != None
mailfilepath = os.path.join(cls.testdir, 'mail', folder)
assert os.path.isdir(mailfilepath)
ret = []
for dirpath, dirs, files in os.walk(mailfilepath):
if not dirpath.endswith((os.path.sep + 'new', os.path.sep + 'cur')):
continue # only /new /cur are interesting
for file in files:
m = cls.re_uidmatch.search(file)
uid = m.group(1) if m else None
ret.append(uid)
return ret
|
agrista/odoo-saas
|
refs/heads/master
|
addons/calendar/calendar.py
|
11
|
# -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
import collections
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import api
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (basestring)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (basestring, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return datetime.strptime(idate.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT).replace(tzinfo=pytz.timezone('UTC'))
else:
return datetime.strptime(idate.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'Odoo'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and attendee.email != email_from:
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)),
subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
"""
Return a list of tuple (id, name, status)
Used by web_calendar.js : Many2ManyAttendee
"""
datas = []
meeting = None
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def _set_calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
# ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds, seconds,)
cr.execute("""SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_alarm > (now() at time zone 'utc' - interval '%%s' second )
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, context=None):
res = []
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=event_maxdelta) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=alarm.duration_minutes) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > datetime.strptime(after.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
_logger.error("Cron for " + self._name + " can not be identified !")
return False
if cron.interval_type == "weeks":
cron_interval = cron.interval_number * 7 * 24 * 60 * 60
elif cron.interval_type == "days":
cron_interval = cron.interval_number * 24 * 60 * 60
elif cron.interval_type == "hours":
cron_interval = cron.interval_number * 60 * 60
elif cron.interval_type == "minutes":
cron_interval = cron.interval_number * 60
elif cron.interval_type == "seconds":
cron_interval = cron.interval_number
else:
cron_interval = False
if not cron_interval:
_logger.error("Cron delay can not be computed !")
return False
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner.id, mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], template_xmlid='calendar_template_meeting_reminder', context=context)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True),
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='duration_minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
def _update_cron(self, cr, uid, context=None):
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('type', '=', 'email')])
def create(self, cr, uid, values, context=None):
res = super(calendar_alarm, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(calendar_alarm, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(calendar_alarm, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (basestring, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (basestring, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, data=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, data, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if data.get('id', False):
data['id'] = calendar_id2real_id(data['id'])
return original_exp_report(db, uid, object, new_ids, data, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
timezone = pytz.timezone(context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.start, DEFAULT_SERVER_DATETIME_FORMAT)) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, cr, uid, id, context=None):
data = self.read(cr, uid, id, ['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop'], context=context)
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['stop'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return final_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def get_date_formats(self, cr, uid, context):
lang = context.get("lang")
res_lang = self.pool.get('res.lang')
lang_params = {}
if lang:
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["date_format", "time_format"])
# formats will be used for str{f,p}time() which do not support unicode in Python 2, coerce to str
format_date = lang_params.get("date_format", '%B-%d-%Y').encode('utf-8')
format_time = lang_params.get("time_format", '%I-%M %p').encode('utf-8')
return (format_date, format_time)
def get_display_time_tz(self, cr, uid, ids, tz=False, context=None):
context = dict(context or {})
if tz:
context["tz"] = tz
ev = self.browse(cr, uid, ids, context=context)[0]
return self._get_display_time(cr, uid, ev.start, ev.stop, ev.duration, ev.allday, context=context)
def _get_display_time(self, cr, uid, start, stop, zduration, zallday, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
context = dict(context or {})
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
context['tz'] = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
tz = context['tz']
tz = tools.ustr(tz).encode('utf-8') # make safe for str{p,f}time()
format_date, format_time = self.get_date_formats(cr, uid, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime(format_date)
display_time = date.strftime(format_time)
if zallday:
time = _("AllDay , %s") % (event_date)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime(format_time), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime(format_date), date_deadline.strftime(format_time), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
if not isinstance(fields, list):
fields = [fields]
for meeting in self.browse(cr, uid, ids, context=context):
meeting_data = {}
res[meeting.id] = meeting_data
attendee = self._find_my_attendee(cr, uid, [meeting.id], context)
for field in fields:
if field == 'is_attendee':
meeting_data[field] = bool(attendee)
elif field == 'attendee_status':
meeting_data[field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
meeting_data[field] = self._get_display_time(cr, uid, meeting.start, meeting.stop, meeting.duration, meeting.allday, context=context)
elif field == "display_start":
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'start':
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'stop':
meeting_data[field] = meeting.stop_date if meeting.allday else meeting.stop_datetime
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
#read these fields as SUPERUSER because if the record is private a normal search could raise an error
events = self.read(cr, SUPERUSER_ID, ids,
['id', 'byday', 'recurrency', 'final_date', 'rrule_type', 'month_by',
'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa',
'su', 'day', 'week_list'], context=context)
for event in events:
if event['recurrency']:
result[event['id']] = self.compute_rule_string(event)
else:
result[event['id']] = ''
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.start
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _set_date(self, cr, uid, values, id=False, context=None):
if context is None:
context = {}
if values.get('start_datetime') or values.get('start_date') or values.get('start') \
or values.get('stop_datetime') or values.get('stop_date') or values.get('stop'):
allday = values.get("allday", None)
event = self.browse(cr, uid, id, context=context)
if allday is None:
if id:
allday = event.allday
else:
allday = False
_logger.warning("Calendar - All day is not specified, arbitrarily set to False")
#raise osv.except_osv(_('Error!'), ("Need to know if it's an allday or not..."))
key = "date" if allday else "datetime"
notkey = "datetime" if allday else "date"
for fld in ('start', 'stop'):
if values.get('%s_%s' % (fld, key)) or values.get(fld):
values['%s_%s' % (fld, key)] = values.get('%s_%s' % (fld, key)) or values.get(fld)
values['%s_%s' % (fld, notkey)] = None
if fld not in values.keys():
values[fld] = values['%s_%s' % (fld, key)]
diff = False
if allday and (values.get('stop_date') or values.get('start_date')):
stop_date = values.get('stop_date') or event.stop_date
start_date = values.get('start_date') or event.start_date
if stop_date and start_date:
diff = datetime.strptime(stop_date.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT) - datetime.strptime(start_date.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
elif values.get('stop_datetime') or values.get('start_datetime'):
stop_datetime = values.get('stop_datetime') or event.stop_datetime
start_datetime = values.get('start_datetime') or event.start_datetime
if stop_datetime and start_datetime:
diff = datetime.strptime(stop_datetime.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(start_datetime.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
values['duration'] = round(duration, 2)
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'start': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'display_start': fields.function(_compute, string='Date', type="char", multi='attendee', store=True),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'start': fields.function(_compute, string='Calculated start', type="datetime", multi='attendee', store=True, required=True),
'stop': fields.function(_compute, string='Calculated stop', type="datetime", multi='attendee', store=True, required=True),
'start_date': fields.date('Start Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'start_datetime': fields.datetime('Start DateTime', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_date': fields.date('End Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_datetime': fields.datetime('End Datetime', states={'done': [('readonly', True)]}, track_visibility='onchange'), # old date_deadline
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
# RECURRENCE FIELD
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'final_date': fields.date('Repeat Until'), # The last event of a recurrence
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to true, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
def _get_default_partners(self, cr, uid, ctx=None):
ret = [self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id]
active_id = ctx.get('active_id')
if ctx.get('active_model') == 'res.partner' and active_id:
if active_id not in ret:
ret.append(active_id)
return ret
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': _get_default_partners,
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.start_datetime and event.stop_datetime < event.start_datetime:
return False
if event.start_date and event.stop_date < event.start_date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start_datetime', 'stop_datetime', 'start_date', 'stop_date'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if starttime:
start = datetime.strptime(starttime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
else:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context=None):
if context is None:
context = {}
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if not context.get('no_email'):
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
if r_date:
sort_fields['sort_start'] = r_date.strftime("%Y%m%d%H%M%S")
else:
sort_fields['sort_start'] = browse_event['display_start'].replace(' ', '').replace('-', '')
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (basestring, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
sort_params = uniq([comp if comp not in ['start', 'start_date', 'start_datetime'] else 'sort_start' for comp in sort_params])
sort_params = uniq([comp if comp not in ['-start', '-start_date', '-start_datetime'] else '-sort_start' for comp in sort_params])
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (basestring, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
if data['interval'] and data['interval'] < 0:
raise osv.except_osv(_('warning!'), _('interval cannot be negative.'))
if data['count'] and data['count'] <= 0:
raise osv.except_osv(_('warning!'), _('count cannot be negative or 0.'))
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('final_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('final_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, DEFAULT_SERVER_DATETIME_FORMAT))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['final_date'] = r._until and r._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('final_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [
('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')),
('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')),
('user_id', '=', uid),
]
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, basestring):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context=context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
#Function used only in calendar_event_data.xml for email template
date = datetime.strptime(date.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
res = str(date.day)
elif interval == 'month':
res = date.strftime('%B') + " " + str(date.year)
elif interval == 'dayname':
res = date.strftime('%A')
elif interval == 'time':
dummy, format_time = self.get_date_formats(cr, uid, context=context)
res = date.strftime(format_time + " %Z")
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += [('partner_ids', 'in', [partner_id])]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('start_date', 'start_datetime', 'start',) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, count=count, context=context)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
self._set_date(cr, uid, default, id=default.get('id'), context=context)
return super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['allday', 'start', 'stop', 'rrule', 'duration'])
data['start_date' if data['allday'] else 'start_datetime'] = data['start']
data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop']
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('start'),
rrule_type=False,
rrule='',
recurrency=False,
final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
for arg in args:
if arg[0] == 'id':
for n, calendar_id in enumerate(arg[2]):
if isinstance(calendar_id, basestring):
arg[2][n] = calendar_id.split('-')[0]
return super(calendar_event, self)._name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit, name_get_uid=name_get_uid)
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['start', 'start_date', 'start_datetime', 'stop', 'stop_date', 'stop_datetime', 'active']:
return True
return False
if not isinstance(ids, (tuple, list)):
ids = [ids]
context = context or {}
self._set_date(cr, uid, values, id=ids[0], context=context)
for one_ids in ids:
if isinstance(one_ids, (basestring, int, long)):
if len(str(one_ids).split('-')) == 1:
ids = [int(one_ids)]
else:
ids = [one_ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in list(ids):
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['start', 'stop', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, [int(event_id) for event_id in ids], values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('start') or values.get('stop')):
for id in ids:
final_date = self._get_recurrency_end_date(cr, uid, id, context=context)
super(calendar_event, self).write(cr, uid, [id], {'final_date': final_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if (values.get('start_date') or values.get('start_datetime', False)) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if the_id:
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
self._set_date(cr, uid, vals, id=False, context=context)
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
final_date = self._get_recurrency_end_date(cr, uid, res, context=context)
self.write(cr, uid, [res], {'final_date': final_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
context = dict(context or {})
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
for result in res:
#remove the count, since the value is not consistent with the result of the search when expand the group
for groupname in groupby:
if result.get(groupname + "_count"):
del result[groupname + "_count"]
result.get('__context', {}).update({'virtual_id': virtual_id})
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (basestring, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1)
if not isinstance(ls, (basestring, int, long)) and len(ls) >= 2:
res['start'] = ls[1]
res['stop'] = ls[2]
if res['allday']:
res['start_date'] = ls[1]
res['stop_date'] = ls[2]
else:
res['start_datetime'] = ls[1]
res['stop_datetime'] = ls[2]
if 'display_time' in fields:
res['display_time'] = self._get_display_time(cr, uid, ls[1], ls[2], res['duration'], res['allday'], context=context)
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'allday', 'start', 'stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (basestring, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, can_be_deleted=True, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
for event_id in ids:
if can_be_deleted and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, int(event_id), context).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(int(event_id))
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), basestring):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
|
cernops/cloudbase-init
|
refs/heads/master
|
cloudbaseinit/openstack/common/versionutils.py
|
5
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import pkg_resources
import six
from cloudbaseinit.openstack.common.gettextutils import _
from cloudbaseinit.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
LOG.deprecated(msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the cloudbaseinit-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
|
nicescale/docker-registry
|
refs/heads/master
|
tests/test_s3.py
|
2
|
# -*- coding: utf-8 -*-
import sys
import StringIO
from nose import tools
from docker_registry.core import exceptions
import docker_registry.testing as testing
from docker_registry.testing import mock_boto # noqa
from . import mock_s3 # noqa
class StringIOWithError(StringIO.StringIO):
'''Throw IOError after reaching EOF.'''
def read(self, size):
if self.pos == self.len:
raise IOError('Reading beyond EOF')
return StringIO.StringIO.read(self, size)
class TestDriver(testing.Driver):
'''Extra tests for coverage completion.'''
def __init__(self):
self.scheme = 's3'
self.path = ''
self.config = testing.Config({})
def tearDown(self):
self._storage._boto_bucket.delete()
super(TestDriver, self).tearDown()
@tools.raises(exceptions.FileNotFoundError)
def test_list_bucket(self):
# Add a couple of bucket keys
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
content = self.gen_random_string()
self._storage.put_content(filename1, content)
# Check bucket key is stored in normalized form
self._storage.put_content(filename2 + '/', content)
# Check both keys are in the bucket
assert sorted([filename1, filename2]) == sorted(
list(self._storage.list_directory()))
# Check listing bucket raises exception after removing keys
self._storage.remove(filename1)
self._storage.remove(filename2)
s = self._storage.list_directory()
s.next()
def test_stream_write(self):
# Check stream write with buffer bigger than default 5MB
self._storage.buffer_size = 7 * 1024 * 1024
filename = self.gen_random_string()
# Test 8MB
content = self.gen_random_string(8 * 1024 * 1024)
io = StringIOWithError(content)
assert not self._storage.exists(filename)
try:
self._storage.stream_write(filename, io)
except IOError:
pass
assert self._storage.exists(filename)
# Test that EOFed io string throws IOError on lib/storage/s3
try:
self._storage.stream_write(filename, io)
except IOError:
pass
# Cleanup
io.close()
self._storage.remove(filename)
self._storage.buffer_size = 5 * 1024 * 1024
assert not self._storage.exists(filename)
def test_init_path(self):
# s3 storage _init_path result keys are relative (no / at start)
root_path = self._storage._root_path
if root_path.startswith('/'):
self._storage._root_path = root_path[1:]
assert not self._storage._init_path().startswith('/')
self._storage._root_path = root_path
def test_debug_key(self):
# Create a valid s3 key object to debug
filename = self.gen_random_string()
content = self.gen_random_string()
self._storage.put_content(filename, content)
# Get filename key path as stored
key_path = self._storage._init_path(filename)
key = self._storage._boto_bucket.lookup(key_path)
self._storage._debug_key(key)
# Capture debugged output
saved_stdout = sys.stdout
output = StringIO.StringIO()
sys.stdout = output
# As key is mocked for unittest purposes, we call make_request directly
dummy = "################\n('d', 1)\n{'v': 2}\n################\n"
# '{}\n{}\n{}\n{}\n'.format(
# '#' * 16, ('d', 1), {'v': 2}, '#' * 16)
result = self._storage._boto_bucket.connection.make_request(
'd', 1, v=2)
assert output.getvalue() == dummy
assert result == 'request result'
sys.stdout = saved_stdout
# We don't call self._storage.remove(filename) here to ensure tearDown
# cleanup properly and that other tests keep running as expected.
|
rscnt/django-cms
|
refs/heads/develop
|
cms/migrations/0009_merge.py
|
60
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0008_auto_20150121_0059'),
('cms', '0008_auto_20150208_2149'),
]
operations = [
]
|
yuxiang-zhou/menpo
|
refs/heads/master
|
menpo/feature/test/test_gradient.py
|
3
|
from nose.tools import raises
import numpy as np
from numpy.testing import assert_allclose
from menpo.image import Image
from menpo.feature import gradient
from menpo.feature.features import _np_gradient
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
example_image = np.array([[1., 2., 6.], [3., 4., 5.]])
y_grad = np.array([[2., 2., -1.], [2., 2., -1.]])
x_grad = np.array([[1., 2.5, 4.], [1., 1., 1.]])
def test_gradient_float():
dtype = np.float32
p = example_image.astype(dtype)
image = Image(p)
grad_image = gradient(image)
_check_assertions(grad_image, image.shape, image.n_channels * 2,
dtype)
np_grad = np.gradient(p)
assert_allclose(grad_image.pixels[0], np_grad[0])
assert_allclose(grad_image.pixels[1], np_grad[1])
def test_gradient_takeo_float32():
dtype = np.float32
t = takeo.copy()
t.pixels = t.pixels.astype(dtype)
grad_image = gradient(t)
_check_assertions(grad_image, t.shape, t.n_channels * 2,
dtype)
np_grad = _np_gradient(t.pixels)
assert_allclose(grad_image.pixels, np_grad)
def test_gradient_double():
dtype = np.float64
p = example_image.astype(dtype)
image = Image(p)
grad_image = gradient(image)
_check_assertions(grad_image, image.shape, image.n_channels * 2,
dtype)
np_grad = np.gradient(p)
assert_allclose(grad_image.pixels[0], np_grad[0])
assert_allclose(grad_image.pixels[1], np_grad[1])
def test_gradient_takeo_double():
t = takeo.copy()
t.pixels = t.pixels.astype(np.float64)
grad_image = gradient(t)
np_grad = _np_gradient(t.pixels)
assert_allclose(grad_image.pixels, np_grad)
@raises(TypeError)
def test_gradient_uint8_exception():
image = Image(example_image.astype(np.uint8))
gradient(image)
def _check_assertions(actual_image, expected_shape, expected_n_channels,
expected_type):
assert (actual_image.pixels.dtype == expected_type)
assert (type(actual_image) == Image)
assert (actual_image.shape == expected_shape)
assert (actual_image.n_channels == expected_n_channels)
|
fnouama/intellij-community
|
refs/heads/master
|
python/testData/quickdoc/Method.py
|
83
|
# just method
class Foo:
@deco
def meth(self):
"""<the_doc>
Doc of meth.
"""
f = Foo()
f.<the_ref>meth
|
konsP/synnefo
|
refs/heads/develop
|
snf-cyclades-app/synnefo/db/migrations/0051_auto__add_bridgepooltable__add_macprefixpooltable.py
|
10
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BridgePoolTable'
db.create_table('db_bridgepooltable', (
('reserved_map', self.gf('django.db.models.fields.TextField')(default='')),
('base', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)),
('offset', self.gf('django.db.models.fields.IntegerField')(null=True)),
('available_map', self.gf('django.db.models.fields.TextField')(default='')),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('size', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('db', ['BridgePoolTable'])
# Adding model 'MacPrefixPoolTable'
db.create_table('db_macprefixpooltable', (
('reserved_map', self.gf('django.db.models.fields.TextField')(default='')),
('base', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)),
('offset', self.gf('django.db.models.fields.IntegerField')(null=True)),
('available_map', self.gf('django.db.models.fields.TextField')(default='')),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('size', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('db', ['MacPrefixPoolTable'])
def backwards(self, orm):
# Deleting model 'BridgePoolTable'
db.delete_table('db_bridgepooltable')
# Deleting model 'MacPrefixPoolTable'
db.delete_table('db_macprefixpooltable')
models = {
'db.backend': {
'Meta': {'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepool': {
'Meta': {'object_name': 'BridgePool'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'default': "'plain'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.macprefixpool': {
'Meta': {'object_name': 'MacPrefixPool'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'gateway6': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reservations': ('django.db.models.fields.TextField', [], {'default': "''"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'subnet': ('django.db.models.fields.CharField', [], {'default': "'10.0.0.0/24'", 'max_length': '32'}),
'subnet6': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'PRIVATE_PHYSICAL_VLAN'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'ipv4': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'}),
'ipv6': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.VirtualMachine']"}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.Network']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']"}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db']
|
nitbot/iot.nitbot.com
|
refs/heads/master
|
miniPC-Program/extLibrary/wiring-x86/wiringx86.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014, Emutex Ltd.
# All rights reserved.
# http://www.emutex.com
#
# Author: Nicolás Pernas Maradei <nicolas.pernas.maradei@emutex.com>
# Author: Dave Hunt <dave@emutex.com>
#
# See license in LICENSE.txt file.
#
# Wiring-x86 is a Python module that lets you use Arduino like functionality
# on
# Intel® Gaileo
# Intel® Gaileo Gen2
# Intel® Edison
import datetime
import os
INPUT = 'in'
INPUT_PULLUP = 'in_pullup'
INPUT_PULLDOWN = 'in_pulldown'
OUTPUT = 'out'
ANALOG_INPUT = 'analog_input'
PWM = 'pwm'
LOW = 'low'
HIGH = 'high'
NONE = 'in'
DRIVE_STRONG = 'strong'
DRIVE_HIZ = 'hiz'
MODE_0 = 'mode0'
MODE_1 = 'mode1'
MODE_2 = 'mode2'
MODE_3 = 'mode3'
MODE_4 = 'mode4'
MODE_5 = 'mode5'
ALL_MODES = (MODE_0, MODE_1, MODE_2, MODE_3, MODE_4, MODE_5)
class GPIOBase(object):
def __init__(self, debug=False):
"""Constructor
Args:
debug: enables the debug mode showing the interaction with sysfs
"""
self.debug = debug
self.pins_in_use = []
self.gpio_handlers = {}
self.exported_pwm = []
self.enabled_pwm = {}
if self.has_pinmux():
self._export_pin(self.pinmux)
self._set_direction(self.pinmux, self.HIGH)
def has_pinmux(self):
return hasattr(self, 'pinmux')
def pinMode(self, pin, mode):
"""Set mode to GPIO pin`.
This function must be called before doing any other operation on the
pin. It also sets up the muxing needed in the board for the pin to
behave as the user wants to.
Args:
pin: Arduino pin number (0-19)
mode: pin mode must be:
OUTPUT: Pin used as output. Use to write into it.
INPUT: Pin used as input (high impedance). Use to read
from it.
INPUT_PULLUP: Pin used as input (pullup resistor). Use to read
from it.
INPUT_PULLDOWN: Pin used as input (pulldown resistor). Use to
read from it.
ANALOG_INPUT: Pin used as analog input (ADC).
PWM: Pin used as analog output (PWM).
"""
if pin not in self.GPIO_MAPPING:
return False
if self.has_pinmux():
self._set_direction(self.pinmux, self.LOW)
mux = self._select_muxing(mode, pin)
if mux is None:
return False
linux_pin = self.GPIO_MAPPING[pin]
self._export_pin(linux_pin)
# In these two cases we open file handlers to write directly into them.
# That makes it faster than going through sysfs.
# No bother with PWM.
if mode == ANALOG_INPUT:
adc = self.ADC_MAPPING[pin]
self._open_analog_handler(linux_pin, adc)
elif mode in (OUTPUT, INPUT, INPUT_PULLUP, INPUT_PULLDOWN):
self._open_digital_handler(linux_pin)
# Walk through the muxing table and set the pins to their values. This
# is the actual muxing.
for vpin, value in mux:
self._export_pin(vpin)
self._set_direction(vpin, value)
if value == NONE:
self._set_drive(vpin, DRIVE_HIZ)
elif value in (HIGH, LOW):
self._set_drive(vpin, DRIVE_STRONG)
self._write_value(vpin, value)
elif value in ALL_MODES:
self._muxmode(vpin, value)
if mode == OUTPUT:
self._set_direction(linux_pin, OUTPUT)
self._set_drive(linux_pin, DRIVE_STRONG)
self._write_value(linux_pin, LOW)
elif mode in (INPUT, INPUT_PULLUP, INPUT_PULLDOWN):
self._set_direction(linux_pin, INPUT)
elif mode == PWM:
self._init_pwm(pin)
if self.has_pinmux():
self._set_direction(self.pinmux, self.HIGH)
return True
def digitalWrite(self, pin, state):
"""Write a value to a GPIO pin.
The GPIO pin is assumed to be configured as OUTPUT
Args:
pin: Arduino pin number (0-19)
state: pin state to be written (LOW-HIGH)
"""
if pin not in self.GPIO_MAPPING:
return
self._write_value_to_handler(self.GPIO_MAPPING[pin], state)
def digitalRead(self, pin):
"""Read GPIO pin's state.
The GPIO pin is assumed to be configured as INPUT
Args:
pin: Arduino pin number (0-19)
Returns:
Current value of the GPIO pin as an Integer
"""
if pin not in self.GPIO_MAPPING:
return
handler = self.gpio_handlers[self.GPIO_MAPPING[pin]]
state = handler.read()
handler.seek(0)
return int(state.strip())
def analogWrite(self, pin, value):
"""Write analog output (PWM)
The GPIO pin is assumed to be configured as PWM. Generates a PWM
signal with the desired duty cycle. The value must be in range 0-255.
Args:
pin: Arduino PWM pin number (3, 5, 6, 9, 10, 11)
value: the duty cycle: between 0 (always off) and 255 (always on)
"""
if pin not in self.PWM_MAPPING:
return
if value < 0:
value = 0
elif value > 255:
value = 255
pwm = self.PWM_MAPPING[pin]
if not self.enabled_pwm.get(pwm, False):
self._enable_pwm(pwm)
self._set_pwm_duty_cycle(pwm, self._get_pwm_period(pin) * value / 255)
def analogRead(self, pin):
"""Read analog input from the pin
The GPIO pin is assumed to be configured as ANALOG_INPUT.
Returns values in range 0-1023
Args:
pin: Arduino analog pin number (14-19)
Returns:
Digital representation with 10 bits resolution (range 0-1023) of
voltage on the pin.
"""
if pin not in self.ADC_MAPPING:
return
handler = self.gpio_handlers[self.GPIO_MAPPING[pin]]
voltage = handler.read()
handler.seek(0)
# ADC chip on the board reports voltages with 12 bits resolution.
# To convert it to 10 bits just shift right 2 bits.
return int(voltage.strip()) >> 2
def setPWMPeriod(self, pin, period):
"""Set the PWM period
Check if the period is valid for the current system and proceed to
set the new period.
Args:
pin: Arduino PWM pin number (3, 5, 6, 9, 10, 11)
period: period in nanoseconds
"""
if period < self.PWM_MIN_PERIOD or period > self.PWM_MAX_PERIOD:
return
self._set_pwm_period(pin, period)
def cleanup(self):
"""Do a general cleanup.
Close all open handlers for reading and writing.
Unexport all exported GPIO pins.
Unexport all exported PWM channels.
Calling this function is not mandatory but it's recommended once you
are done using the library if it's being used with a larger
application that runs for a long period of time.
"""
for pin in self.pins_in_use:
self._unexport_pin(pin)
del self.pins_in_use[:]
for handler in self.gpio_handlers.values():
handler.close()
self.gpio_handlers.clear()
for pwm in self.exported_pwm:
self._unexport_pwm(pwm)
del self.exported_pwm[:]
self.enabled_pwm.clear()
def _select_muxing(self, mode, pin):
if mode == OUTPUT:
return self.GPIO_MUX_OUTPUT[pin]
elif mode == INPUT:
return self.GPIO_MUX_INPUT[pin]
elif mode == INPUT_PULLUP:
return self.GPIO_MUX_INPUT_PULLUP[pin]
elif mode == INPUT_PULLDOWN:
return self.GPIO_MUX_INPUT_PULLDOWN[pin]
elif mode == ANALOG_INPUT and pin in self.ADC_MAPPING:
return self.GPIO_MUX_ANALOG_INPUT[pin]
elif mode == PWM and pin in self.PWM_MAPPING:
return self.GPIO_MUX_PWM[pin]
return None
def _open_digital_handler(self, linux_pin):
try:
f = open('/sys/class/gpio/gpio%d/value' % linux_pin, 'r+')
self.gpio_handlers[linux_pin] = f
except:
print "Failed opening digital value file for pin %d" % linux_pin
def _open_analog_handler(self, linux_pin, adc):
try:
f = open('/sys/bus/iio/devices/iio:device%d/in_voltage%d_raw' % (self.adc_iio_device, adc), 'r+')
self.gpio_handlers[linux_pin] = f
except:
print "Failed opening analog value file for pin %d" % linux_pin
def _write_value(self, linux_pin, state):
value = 1
if state == LOW:
value = 0
cmd = 'echo %d > /sys/class/gpio/gpio%d/value' % (value, linux_pin)
self._exec_cmd(self._write_value.__name__, cmd)
def _write_value_to_handler(self, linux_pin, state):
handler = self.gpio_handlers[linux_pin]
value = '0' if state == LOW else '1'
handler.write(value)
handler.seek(0)
def _set_direction(self, linux_pin, direction):
dirfile = '/sys/class/gpio/gpio%d/direction' % linux_pin
cmd = 'test -f %s && echo %s > %s 2>&1' % (dirfile, direction, dirfile)
self._exec_cmd(self._set_direction.__name__, cmd)
def _export_pin(self, linux_pin):
self.pins_in_use.append(linux_pin)
cmd = 'echo %d > /sys/class/gpio/export 2>&1' % linux_pin
self._exec_cmd(self._export_pin.__name__, cmd)
def _unexport_pin(self, linux_pin):
cmd = 'echo %d > /sys/class/gpio/unexport 2>&1' % linux_pin
self._exec_cmd(self._unexport_pin.__name__, cmd)
def _muxmode(self, linux_pin, mode):
cmd = 'echo %s > /sys/kernel/debug/gpio_debug/gpio%d/current_pinmux' % (mode, linux_pin)
self._exec_cmd(self._muxmode.__name__, cmd)
def _set_drive(self, linux_pin, drive):
if not self.has_pinmux():
cmd = 'echo %s > /sys/class/gpio/gpio%d/drive > /dev/null' % (drive, linux_pin)
self._exec_cmd(self._set_drive.__name__, cmd)
def _export_pwm(self, channel):
self.exported_pwm.append(channel)
cmd = 'echo %d > /sys/class/pwm/pwmchip0/export 2>&1' % channel
self._exec_cmd(self._export_pwm.__name__, cmd)
def _unexport_pwm(self, channel):
cmd = 'echo %d > /sys/class/pwm/pwmchip0/unexport 2>&1' % channel
self._exec_cmd(self._unexport_pwm.__name__, cmd)
def _set_pwm_duty_cycle(self, channel, duty_cycle):
cmd = 'echo %d > /sys/class/pwm/pwmchip0/pwm%d/duty_cycle' % (duty_cycle, channel)
self._exec_cmd(self._set_pwm_duty_cycle.__name__, cmd)
def _enable_pwm(self, pwm):
self.enabled_pwm[pwm] = True
cmd = 'echo 1 > /sys/class/pwm/pwmchip0/pwm%d/enable' % pwm
self._exec_cmd(self._enable_pwm.__name__, cmd)
def __debug(self, func_name, cmd):
if self.debug:
now = datetime.datetime.now().strftime("%B %d %I:%M:%S")
print '{0} {1: <20}{2}'.format(now, func_name + ':', cmd)
def _exec_cmd(self, caller, command):
self.__debug(caller, command)
os.system(command)
setattr(GPIOBase, 'INPUT', INPUT)
setattr(GPIOBase, 'INPUT_PULLUP', INPUT_PULLUP)
setattr(GPIOBase, 'INPUT_PULLDOWN', INPUT_PULLDOWN)
setattr(GPIOBase, 'OUTPUT', OUTPUT)
setattr(GPIOBase, 'ANALOG_INPUT', ANALOG_INPUT)
setattr(GPIOBase, 'PWM', PWM)
setattr(GPIOBase, 'LOW', LOW)
setattr(GPIOBase, 'HIGH', HIGH)
class GPIOGalileo(GPIOBase):
"""Class for managing GPIO pinout on Intel® Galileo board
See docs/ directory for more information.
"""
GPIO_MAPPING = {
0: 50,
1: 51,
2: 32,
3: 18,
4: 28,
5: 17,
6: 24,
7: 27,
8: 26,
9: 19,
10: 16,
11: 25,
12: 38,
13: 39,
14: 44,
15: 45,
16: 46,
17: 47,
18: 48,
19: 49,
}
ADC_MAPPING = {
14: 0,
15: 1,
16: 2,
17: 3,
18: 4,
19: 5,
}
PWM_MAPPING = {
3: 3,
5: 5,
6: 6,
9: 1,
10: 7,
11: 4,
}
GPIO_MUX_OUTPUT = {
0: ((40, HIGH), ),
1: ((41, HIGH), ),
2: ((31, HIGH), ),
3: ((30, HIGH), ),
4: (),
5: (),
6: (),
7: (),
8: (),
9: (),
10: ((41, HIGH), ),
11: ((43, HIGH), ),
12: ((54, HIGH), ),
13: ((55, HIGH), ),
14: ((37, HIGH), ),
15: ((36, HIGH), ),
16: ((23, HIGH), ),
17: ((22, HIGH), ),
18: ((21, HIGH), (29, HIGH)),
19: ((20, HIGH), (29, HIGH)),
}
GPIO_MUX_INPUT = GPIO_MUX_OUTPUT
GPIO_MUX_INPUT_PULLUP = GPIO_MUX_OUTPUT
GPIO_MUX_INPUT_PULLDOWN = GPIO_MUX_OUTPUT
GPIO_MUX_ANALOG_INPUT = {
14: ((37, LOW), ),
15: ((36, LOW), ),
16: ((23, LOW), ),
17: ((22, LOW), ),
18: ((21, LOW), (29, HIGH)),
19: ((20, LOW), (29, HIGH)),
}
GPIO_MUX_PWM = {
3: ((30, HIGH), ),
5: (),
6: (),
9: (),
10: ((41, HIGH), ),
11: ((43, HIGH), ),
}
PWM_MIN_PERIOD = 62500
PWM_MAX_PERIOD = 7999999
PWM_DEFAULT_PERIOD = 5000000
def __init__(self, **kwargs):
self.adc_iio_device = 0
super(GPIOGalileo, self).__init__(**kwargs)
self.pwm_periods = {}
for pwm in self.PWM_MAPPING.keys():
self.pwm_periods[pwm] = self.PWM_DEFAULT_PERIOD
def _set_pwm_period(self, pin, period):
channel = self.PWM_MAPPING[pin]
self.pwm_periods[pin] = period
cmd = 'echo %d > /sys/class/pwm/pwmchip0/pwm%d/period' % (period, channel)
self._exec_cmd(self._set_pwm_period.__name__, cmd)
def _get_pwm_period(self, pin):
return self.pwm_periods[pin]
def _init_pwm(self, pin):
linux_pin = self.GPIO_MAPPING[pin]
self._set_drive(linux_pin, DRIVE_STRONG)
self._set_direction(linux_pin, OUTPUT)
self._write_value(linux_pin, HIGH)
pwm = self.PWM_MAPPING[pin]
self._export_pwm(pwm)
self.enabled_pwm[pwm] = False
self._set_pwm_period(pin, self.pwm_periods[pin])
self._set_pwm_duty_cycle(pwm, 0)
class GPIOGalileoGen2(GPIOBase):
"""Class for managing GPIO pinout on Intel® Galileo Gen2 board
See docs/ directory for more information.
"""
GPIO_MAPPING = {
0: 11,
1: 12,
2: 61,
3: 62,
4: 6,
5: 0,
6: 1,
7: 38,
8: 40,
9: 4,
10: 10,
11: 5,
12: 15,
13: 7,
14: 48,
15: 50,
16: 52,
17: 54,
18: 56,
19: 58,
}
ADC_MAPPING = {
14: 0,
15: 1,
16: 2,
17: 3,
18: 4,
19: 5,
}
PWM_MAPPING = {
3: 1,
5: 3,
6: 5,
9: 7,
10: 11,
11: 9,
}
GPIO_MUX_OUTPUT = {
0: ((32, LOW), (33, NONE)),
1: ((45, LOW), (28, LOW), (29, NONE)),
2: ((77, LOW), (34, LOW), (35, NONE), (13, NONE)),
3: ((64, LOW), (76, LOW), (16, LOW), (17, NONE), (14, NONE)),
4: ((36, LOW), (37, NONE)),
5: ((66, LOW), (18, LOW), (19, NONE)),
6: ((68, LOW), (20, LOW), (21, NONE)),
7: ((39, NONE), ),
8: ((41, NONE), ),
9: ((70, LOW), (22, LOW), (23, NONE)),
10: ((74, LOW), (26, LOW), (27, NONE)),
11: ((44, LOW), (72, LOW), (24, LOW), (25, NONE)),
12: ((42, LOW), (43, NONE)),
13: ((46, LOW), (30, LOW), (31, NONE)),
14: ((49, NONE), ),
15: ((51, NONE), ),
16: ((53, NONE), ),
17: ((55, NONE), ),
18: ((78, HIGH), (60, HIGH), (57, NONE)),
19: ((79, HIGH), (60, HIGH), (59, NONE)),
}
GPIO_MUX_INPUT = {
0: ((32, HIGH), (33, NONE)),
1: ((45, LOW), (28, HIGH), (29, NONE)),
2: ((77, LOW), (34, HIGH), (35, NONE), (13, NONE)),
3: ((64, LOW), (76, LOW), (16, HIGH), (17, NONE), (14, NONE)),
4: ((36, HIGH), (37, NONE)),
5: ((66, LOW), (18, HIGH), (19, NONE)),
6: ((68, LOW), (20, HIGH), (21, NONE)),
7: ((39, NONE), ),
8: ((41, NONE), ),
9: ((70, LOW), (22, HIGH), (23, NONE)),
10: ((74, LOW), (26, HIGH), (27, NONE)),
11: ((44, LOW), (72, LOW), (24, HIGH), (25, NONE)),
12: ((42, HIGH), (43, NONE)),
13: ((46, LOW), (30, HIGH), (31, NONE)),
14: ((49, NONE), ),
15: ((51, NONE), ),
16: ((53, NONE), ),
17: ((55, NONE), ),
18: ((78, HIGH), (60, HIGH), (57, NONE)),
19: ((79, HIGH), (60, HIGH), (59, NONE)),
}
GPIO_MUX_INPUT_PULLUP = {
0: ((32, HIGH), (33, HIGH)),
1: ((45, LOW), (28, HIGH), (29, HIGH)),
2: ((77, LOW), (34, HIGH), (35, HIGH), (13, NONE)),
3: ((64, LOW), (76, LOW), (16, HIGH), (17, HIGH), (14, NONE)),
4: ((36, HIGH), (37, HIGH)),
5: ((66, LOW), (18, HIGH), (19, HIGH)),
6: ((68, LOW), (20, HIGH), (21, HIGH)),
7: ((39, HIGH), ),
8: ((41, HIGH), ),
9: ((70, LOW), (22, HIGH), (23, HIGH)),
10: ((74, LOW), (26, HIGH), (27, HIGH)),
11: ((44, LOW), (72, LOW), (24, HIGH), (25, HIGH)),
12: ((42, HIGH), (43, HIGH)),
13: ((46, LOW), (30, HIGH), (31, HIGH)),
14: ((49, HIGH), ),
15: ((51, HIGH), ),
16: ((53, HIGH), ),
17: ((55, HIGH), ),
18: ((78, HIGH), (60, HIGH), (57, HIGH)),
19: ((79, HIGH), (60, HIGH), (59, HIGH)),
}
GPIO_MUX_INPUT_PULLDOWN = {
0: ((32, HIGH), ),
1: ((45, LOW), (28, HIGH), (29, LOW)),
2: ((77, LOW), (34, HIGH), (35, LOW), (13, NONE)),
3: ((64, LOW), (76, LOW), (16, HIGH), (17, LOW), (14, NONE)),
4: ((36, HIGH), (37, LOW)),
5: ((66, LOW), (18, HIGH), (19, LOW)),
6: ((68, LOW), (20, HIGH), (21, LOW)),
7: ((39, LOW), ),
8: ((41, LOW), ),
9: ((70, LOW), (22, HIGH), (23, LOW)),
10: ((74, LOW), (26, HIGH), (27, LOW)),
11: ((44, LOW), (72, LOW), (24, HIGH), (25, LOW)),
12: ((42, HIGH), (43, LOW)),
13: ((46, LOW), (30, HIGH), (31, LOW)),
14: ((49, LOW), ),
15: ((51, LOW), ),
16: ((53, LOW), ),
17: ((55, LOW), ),
18: ((78, HIGH), (60, HIGH), (57, LOW)),
19: ((79, HIGH), (60, HIGH), (59, LOW)),
}
GPIO_MUX_ANALOG_INPUT = {
14: ((48, NONE), (49, NONE)),
15: ((50, NONE), (51, NONE)),
16: ((52, NONE), (53, NONE)),
17: ((54, NONE), (55, NONE)),
18: ((78, LOW), (60, HIGH), (56, NONE), (57, NONE)),
19: ((79, LOW), (60, HIGH), (58, NONE), (59, NONE)),
}
GPIO_MUX_PWM = {
3: ((64, HIGH), (76, LOW), (16, LOW), (17, NONE), (62, NONE)),
5: ((66, HIGH), (18, LOW), (19, NONE)),
6: ((68, HIGH), (20, LOW), (21, NONE)),
9: ((70, HIGH), (22, LOW), (23, NONE)),
10: ((74, HIGH), (26, LOW), (27, NONE)),
11: ((72, HIGH), (24, LOW), (25, NONE)),
}
PWM_MIN_PERIOD = 666666
PWM_MAX_PERIOD = 41666666
PWM_DEFAULT_PERIOD = 5000000
def __init__(self, **kwargs):
self.adc_iio_device = 0
super(GPIOGalileoGen2, self).__init__(**kwargs)
self.pwm_period = self.PWM_DEFAULT_PERIOD
self.is_pwm_period_set = False
def _set_pwm_period(self, pin, period):
"""On GalileoGen2 all PWM channels share the same period. When this is
set all the PWM outputs are disabled for at least 1ms while the chip
reconfigures itself. The PWM pin is then ignored.
"""
self.pwm_period = period
cmd = 'echo %d > /sys/class/pwm/pwmchip0/device/pwm_period' % period
self._exec_cmd(self._set_pwm_period.__name__, cmd)
def _get_pwm_period(self, pin):
return self.pwm_period
def _init_pwm(self, pin):
pwm = self.PWM_MAPPING[pin]
self._export_pwm(pwm)
self._set_pwm_duty_cycle(pwm, 0)
self.enabled_pwm[pwm] = False
if not self.is_pwm_period_set:
self._set_pwm_period(pin, self.pwm_period)
self.is_pwm_period_set = True
class GPIOEdison(GPIOBase):
"""Class for managing GPIO pinout on Intel®Edison board
See docs/ directory for more information.
"""
GPIO_MAPPING = {
0: 130,
1: 131,
2: 128,
3: 12,
4: 129,
5: 13,
6: 182,
7: 48,
8: 49,
9: 183,
10: 41,
11: 43,
12: 42,
13: 40,
14: 44,
15: 45,
16: 46,
17: 47,
18: 14,
19: 165,
}
ADC_MAPPING = {
14: 0,
15: 1,
16: 2,
17: 3,
18: 4,
19: 5,
}
PWM_MAPPING = {
3: 0,
5: 1,
6: 2,
9: 3,
# TODO: enable swizzler
10: None,
11: None,
}
GPIO_MUX_OUTPUT = {
0: ((130, MODE_0), (248, HIGH), (216, HIGH)),
1: ((131, MODE_0), (249, HIGH), (217, HIGH)),
2: ((128, MODE_0), (250, HIGH), (218, HIGH)),
3: (( 12, MODE_0), (251, HIGH), (219, HIGH)),
4: ((129, MODE_0), (252, HIGH), (220, HIGH)),
5: (( 13, MODE_0), (253, HIGH), (221, HIGH)),
6: ((182, MODE_0), (254, HIGH), (222, HIGH)),
7: (( 48, MODE_0), (255, HIGH), (223, HIGH)),
8: (( 49, MODE_0), (256, HIGH), (224, HIGH)),
9: ((183, MODE_0), (257, HIGH), (225, HIGH)),
10: (( 41, MODE_0), (258, HIGH), (226, HIGH), (240, LOW), (263, HIGH)),
11: (( 43, MODE_0), (259, HIGH), (227, HIGH), (241, LOW), (262, HIGH)),
12: (( 42, MODE_0), (260, HIGH), (228, HIGH), (242, LOW)),
13: (( 40, MODE_0), (261, HIGH), (229, HIGH), (243, LOW)),
14: (( 44, MODE_0), (232, HIGH), (208, HIGH), (200, LOW)),
15: (( 45, MODE_0), (233, HIGH), (209, HIGH), (201, LOW)),
16: (( 46, MODE_0), (234, HIGH), (210, HIGH), (202, LOW)),
17: (( 47, MODE_0), (235, HIGH), (211, HIGH), (203, LOW)),
18: (( 14, MODE_0), (236, HIGH), (212, HIGH), (204, LOW)),
19: ((165, MODE_0), (237, HIGH), (213, HIGH), (205, LOW)),
}
GPIO_MUX_INPUT = {
0: ((130, MODE_0), (248, LOW), (216, NONE)),
1: ((131, MODE_0), (249, LOW), (217, NONE)),
2: ((128, MODE_0), (250, LOW), (218, NONE)),
3: (( 12, MODE_0), (251, LOW), (219, NONE)),
4: ((129, MODE_0), (252, LOW), (220, NONE)),
5: (( 13, MODE_0), (253, LOW), (221, NONE)),
6: ((182, MODE_0), (254, LOW), (222, NONE)),
7: (( 48, MODE_0), (255, LOW), (223, NONE)),
8: (( 49, MODE_0), (256, LOW), (224, NONE)),
9: ((183, MODE_0), (257, LOW), (225, NONE)),
10: (( 41, MODE_0), (258, LOW), (226, NONE), (240, LOW), (263, HIGH)),
11: (( 43, MODE_0), (259, LOW), (227, NONE), (241, LOW), (262, HIGH)),
12: (( 42, MODE_0), (260, LOW), (228, NONE), (242, LOW)),
13: (( 40, MODE_0), (261, LOW), (229, NONE), (243, LOW)),
14: (( 44, MODE_0), (232, LOW), (208, NONE), (200, LOW)),
15: (( 45, MODE_0), (233, LOW), (209, NONE), (201, LOW)),
16: (( 46, MODE_0), (234, LOW), (210, NONE), (202, LOW)),
17: (( 47, MODE_0), (235, LOW), (211, NONE), (203, LOW)),
18: (( 14, MODE_0), (236, LOW), (212, NONE), (204, LOW)),
19: ((165, MODE_0), (237, LOW), (213, NONE), (205, LOW)),
}
GPIO_MUX_INPUT_PULLUP = {
0: ((130, MODE_0), (248, LOW), (216, HIGH)),
1: ((131, MODE_0), (249, LOW), (217, HIGH)),
2: ((128, MODE_0), (250, LOW), (218, HIGH)),
3: (( 12, MODE_0), (251, LOW), (219, HIGH)),
4: ((129, MODE_0), (252, LOW), (220, HIGH)),
5: (( 13, MODE_0), (253, LOW), (221, HIGH)),
6: ((182, MODE_0), (254, LOW), (222, HIGH)),
7: (( 48, MODE_0), (255, LOW), (223, HIGH)),
8: (( 49, MODE_0), (256, LOW), (224, HIGH)),
9: ((183, MODE_0), (257, LOW), (225, HIGH)),
10: (( 41, MODE_0), (258, LOW), (226, HIGH), (240, LOW), (263, HIGH)),
11: (( 43, MODE_0), (259, LOW), (227, HIGH), (241, LOW), (262, HIGH)),
12: (( 42, MODE_0), (260, LOW), (228, HIGH), (242, LOW)),
13: (( 40, MODE_0), (261, LOW), (229, HIGH), (243, LOW)),
14: (( 44, MODE_0), (232, LOW), (208, HIGH), (200, LOW)),
15: (( 45, MODE_0), (233, LOW), (209, HIGH), (201, LOW)),
16: (( 46, MODE_0), (234, LOW), (210, HIGH), (202, LOW)),
17: (( 47, MODE_0), (235, LOW), (211, HIGH), (203, LOW)),
18: (( 14, MODE_0), (236, LOW), (212, HIGH), (204, LOW)),
19: ((165, MODE_0), (237, LOW), (213, HIGH), (205, LOW)),
}
GPIO_MUX_INPUT_PULLDOWN = {
0: ((130, MODE_0), (248, LOW), (216, LOW)),
1: ((131, MODE_0), (249, LOW), (217, LOW)),
2: ((128, MODE_0), (250, LOW), (218, LOW)),
3: (( 12, MODE_0), (251, LOW), (219, LOW)),
4: ((129, MODE_0), (252, LOW), (220, LOW)),
5: (( 13, MODE_0), (253, LOW), (221, LOW)),
6: ((182, MODE_0), (254, LOW), (222, LOW)),
7: (( 48, MODE_0), (255, LOW), (223, LOW)),
8: (( 49, MODE_0), (256, LOW), (224, LOW)),
9: ((183, MODE_0), (257, LOW), (225, LOW)),
10: (( 41, MODE_0), (258, LOW), (226, LOW), (240, LOW), (263, HIGH)),
11: (( 43, MODE_0), (259, LOW), (227, LOW), (241, LOW), (262, HIGH)),
12: (( 42, MODE_0), (260, LOW), (228, LOW), (242, LOW)),
13: (( 40, MODE_0), (261, LOW), (229, LOW), (243, LOW)),
14: (( 44, MODE_0), (232, LOW), (208, LOW), (200, LOW)),
15: (( 45, MODE_0), (233, LOW), (209, LOW), (201, LOW)),
16: (( 46, MODE_0), (234, LOW), (210, LOW), (202, LOW)),
17: (( 47, MODE_0), (235, LOW), (211, LOW), (203, LOW)),
18: (( 14, MODE_0), (236, LOW), (212, LOW), (204, LOW)),
19: ((165, MODE_0), (237, LOW), (213, LOW), (205, LOW)),
}
GPIO_MUX_ANALOG_INPUT = {
14: (( 44, MODE_0), (200, HIGH), (232, LOW), (208, NONE)),
15: (( 45, MODE_0), (201, HIGH), (233, LOW), (209, NONE)),
16: (( 46, MODE_0), (202, HIGH), (234, LOW), (210, NONE)),
17: (( 47, MODE_0), (203, HIGH), (235, LOW), (211, NONE)),
18: (( 14, MODE_0), (204, HIGH), (236, LOW), (212, NONE)),
19: ((165, MODE_0), (205, HIGH), (237, LOW), (213, NONE)),
}
GPIO_MUX_PWM = {
3: (( 12, MODE_1), (251, HIGH), (219, NONE)),
5: (( 13, MODE_1), (253, HIGH), (221, NONE)),
6: ((182, MODE_1), (254, HIGH), (222, NONE)),
9: ((183, MODE_1), (257, HIGH), (225, NONE)),
10: (( 41, MODE_1), (258, HIGH), (226, NONE), (240, LOW), (263, HIGH)),
11: (( 43, MODE_1), (259, HIGH), (227, NONE), (241, LOW), (262, HIGH)),
}
PWM_MIN_PERIOD = 104
PWM_MAX_PERIOD = 218453000
PWM_DEFAULT_PERIOD = 2048000
def __init__(self, **kwargs):
self.pinmux = 214
self.adc_iio_device = 1
super(GPIOEdison, self).__init__(**kwargs)
self.pwm_periods = {}
for pin in self.PWM_MAPPING.keys():
self.pwm_periods[pin] = self.PWM_DEFAULT_PERIOD
# Set all pins into a safe state at startup.
for i in range(0, 20):
self.pinMode(i, INPUT)
def _set_pwm_period(self, pin, period):
self.pwm_periods[pin] = period
channel = self.PWM_MAPPING[pin]
cmd = 'echo %d > /sys/class/pwm/pwmchip0/pwm%d/period' % (period, channel)
self._exec_cmd(self._set_pwm_period.__name__, cmd)
def _get_pwm_period(self, pin):
return self.pwm_periods[pin]
def _init_pwm(self, pin):
pwm = self.PWM_MAPPING[pin]
self._export_pwm(pwm)
self._set_pwm_period(pin, self.pwm_periods[pin])
self._set_pwm_duty_cycle(pwm, 0)
self._enable_pwm(pwm)
|
foss-transportationmodeling/rettina-server
|
refs/heads/master
|
flask/local/lib/python2.7/site-packages/sqlparse/engine/__init__.py
|
119
|
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""filter"""
from sqlparse import lexer
from sqlparse.engine import grouping
from sqlparse.engine.filter import StatementFilter
# XXX remove this when cleanup is complete
Filter = object
class FilterStack(object):
def __init__(self):
self.preprocess = []
self.stmtprocess = []
self.postprocess = []
self.split_statements = False
self._grouping = False
def _flatten(self, stream):
for token in stream:
if token.is_group():
for t in self._flatten(token.tokens):
yield t
else:
yield token
def enable_grouping(self):
self._grouping = True
def full_analyze(self):
self.enable_grouping()
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
if self.preprocess:
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)
if self._grouping:
def _group(stream):
for stmt in stream:
grouping.group(stmt)
yield stmt
stream = _group(stream)
if self.stmtprocess:
def _run1(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
stream = _run1(stream)
if self.postprocess:
def _run2(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
stream = _run2(stream)
return stream
|
youdonghai/intellij-community
|
refs/heads/master
|
python/testData/fillParagraph/string.py
|
83
|
p = "my new <caret>string blah blah blah ;j;dsjv sd;fj;dsjf;ds js;djgf ;jsg s;dgj; sjd;gj sd;gj ;sjg;j asdl j;sjdg; jasdgl j;sldjg ;jsd;gj "
|
pydoit/doit
|
refs/heads/master
|
tests/test_cmd_help.py
|
3
|
from doit.doit_cmd import DoitMain
def cmd_main(args, extra_config=None, bin_name='doit'):
if extra_config:
extra_config = {'GLOBAL': extra_config}
main = DoitMain(extra_config=extra_config)
main.BIN_NAME = bin_name
return main.run(args)
class TestHelp(object):
def test_help_usage(self, capsys):
returned = cmd_main(["help"])
assert returned == 0
out, err = capsys.readouterr()
assert "doit list" in out
def test_help_usage_custom_name(self, capsys):
returned = cmd_main(["help"], bin_name='mytool')
assert returned == 0
out, err = capsys.readouterr()
assert "mytool list" in out
def test_help_plugin_name(self, capsys):
plugin = {'XXX': 'tests.sample_plugin:MyCmd'}
main = DoitMain(extra_config={'COMMAND':plugin})
main.BIN_NAME = 'doit'
returned = main.run(["help"])
assert returned == 0
out, err = capsys.readouterr()
assert "doit XXX " in out
assert "test extending doit commands" in out, out
def test_help_task_params(self, capsys):
returned = cmd_main(["help", "task"])
assert returned == 0
out, err = capsys.readouterr()
assert "Task Dictionary parameters" in out
def test_help_cmd(self, capsys):
returned = cmd_main(["help", "list"], {'dep_file': 'foo.db'})
assert returned == 0
out, err = capsys.readouterr()
assert "PURPOSE" in out
assert "list tasks from dodo file" in out
# overwritten defaults, are shown as default
assert "file used to save successful runs [default: foo.db]" in out
def test_help_task_name(self, capsys, restore_cwd, depfile_name):
returned = cmd_main(["help", "-f", "tests/loader_sample.py",
"--db-file", depfile_name, "xxx1"])
assert returned == 0
out, err = capsys.readouterr()
assert "xxx1" in out # name
assert "task doc" in out # doc
assert "-p" in out # params
def test_help_wrong_name(self, capsys, restore_cwd, depfile_name):
returned = cmd_main(["help", "-f", "tests/loader_sample.py",
"--db-file", depfile_name, "wrong_name"])
assert returned == 0 # TODO return different value?
out, err = capsys.readouterr()
assert "doit list" in out
def test_help_no_dodo_file(self, capsys):
returned = cmd_main(["help", "-f", "no_dodo", "wrong_name"])
assert returned == 0 # TODO return different value?
out, err = capsys.readouterr()
assert "doit list" in out
|
kishkaru/python-driver
|
refs/heads/master
|
cassandra/cqlengine/columns.py
|
2
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy, copy
from datetime import date, datetime, timedelta
import logging
import six
from uuid import UUID as _UUID
from cassandra import util
from cassandra.cqltypes import SimpleDateType, _cqltypes, UserType
from cassandra.cqlengine import ValidationError
from cassandra.cqlengine.functions import get_total_seconds
log = logging.getLogger(__name__)
class BaseValueManager(object):
def __init__(self, instance, column, value):
self.instance = instance
self.column = column
self.value = value
self.previous_value = None
self.explicit = False
@property
def deleted(self):
return self.column._val_is_null(self.value) and (self.explicit or self.previous_value is not None)
@property
def changed(self):
"""
Indicates whether or not this value has changed.
:rtype: boolean
"""
return self.value != self.previous_value
def reset_previous_value(self):
self.previous_value = deepcopy(self.value)
def getval(self):
return self.value
def setval(self, val):
self.value = val
def delval(self):
self.value = None
def get_property(self):
_get = lambda slf: self.getval()
_set = lambda slf, val: self.setval(val)
_del = lambda slf: self.delval()
if self.column.can_delete:
return property(_get, _set, _del)
else:
return property(_get, _set)
class Column(object):
# the cassandra type this column maps to
db_type = None
value_manager = BaseValueManager
instance_counter = 0
_python_type_hashable = True
primary_key = False
"""
bool flag, indicates this column is a primary key. The first primary key defined
on a model is the partition key (unless partition keys are set), all others are cluster keys
"""
partition_key = False
"""
indicates that this column should be the partition key, defining
more than one partition key column creates a compound partition key
"""
index = False
"""
bool flag, indicates an index should be created for this column
"""
db_field = None
"""
the fieldname this field will map to in the database
"""
default = None
"""
the default value, can be a value or a callable (no args)
"""
required = False
"""
boolean, is the field required? Model validation will raise and
exception if required is set to True and there is a None value assigned
"""
clustering_order = None
"""
only applicable on clustering keys (primary keys that are not partition keys)
determines the order that the clustering keys are sorted on disk
"""
discriminator_column = False
"""
boolean, if set to True, this column will be used for discriminating records
of inherited models.
Should only be set on a column of an abstract model being used for inheritance.
There may only be one discriminator column per model. See :attr:`~.__discriminator_value__`
for how to specify the value of this column on specialized models.
"""
static = False
"""
boolean, if set to True, this is a static column, with a single value per partition
"""
def __init__(self,
primary_key=False,
partition_key=False,
index=False,
db_field=None,
default=None,
required=False,
clustering_order=None,
discriminator_column=False,
static=False):
self.partition_key = partition_key
self.primary_key = partition_key or primary_key
self.index = index
self.db_field = db_field
self.default = default
self.required = required
self.clustering_order = clustering_order
self.discriminator_column = discriminator_column
# the column name in the model definition
self.column_name = None
self._partition_key_index = None
self.static = static
self.value = None
# keep track of instantiation order
self.position = Column.instance_counter
Column.instance_counter += 1
def validate(self, value):
"""
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
"""
if value is None:
if self.required:
raise ValidationError('{0} - None values are not allowed'.format(self.column_name or self.db_field))
return value
def to_python(self, value):
"""
Converts data from the database into python values
raises a ValidationError if the value can't be converted
"""
return value
def to_database(self, value):
"""
Converts python value into database value
"""
if value is None and self.has_default:
return self.get_default()
return value
@property
def has_default(self):
return self.default is not None
@property
def is_primary_key(self):
return self.primary_key
@property
def can_delete(self):
return not self.primary_key
def get_default(self):
if self.has_default:
if callable(self.default):
return self.default()
else:
return self.default
def get_column_def(self):
"""
Returns a column definition for CQL table definition
"""
static = "static" if self.static else ""
return '{0} {1} {2}'.format(self.cql, self.db_type, static)
# TODO: make columns use cqltypes under the hood
# until then, this bridges the gap in using types along with cassandra.metadata for CQL generation
def cql_parameterized_type(self):
return self.db_type
def set_column_name(self, name):
"""
Sets the column name during document class construction
This value will be ignored if db_field is set in __init__
"""
self.column_name = name
@property
def db_field_name(self):
""" Returns the name of the cql name of this column """
return self.db_field or self.column_name
@property
def db_index_name(self):
""" Returns the name of the cql index """
return 'index_{0}'.format(self.db_field_name)
@property
def cql(self):
return self.get_cql()
def get_cql(self):
return '"{0}"'.format(self.db_field_name)
def _val_is_null(self, val):
""" determines if the given value equates to a null value for the given column type """
return val is None
@property
def sub_types(self):
return []
@property
def cql_type(self):
return _cqltypes[self.db_type]
class Blob(Column):
"""
Stores a raw binary value
"""
db_type = 'blob'
def to_database(self, value):
if not isinstance(value, (six.binary_type, bytearray)):
raise Exception("expecting a binary, got a %s" % type(value))
val = super(Bytes, self).to_database(value)
return bytearray(val)
Bytes = Blob
class Ascii(Column):
"""
Stores a US-ASCII character string
"""
db_type = 'ascii'
class Inet(Column):
"""
Stores an IP address in IPv4 or IPv6 format
"""
db_type = 'inet'
class Text(Column):
"""
Stores a UTF-8 encoded string
"""
db_type = 'text'
def __init__(self, min_length=None, max_length=None, **kwargs):
"""
:param int min_length: Sets the minimum length of this string, for validation purposes.
Defaults to 1 if this is a ``required`` column. Otherwise, None.
:param int max_length: Sets the maximum length of this string, for validation purposes.
"""
self.min_length = min_length or (1 if kwargs.get('required', False) else None)
self.max_length = max_length
super(Text, self).__init__(**kwargs)
def validate(self, value):
value = super(Text, self).validate(value)
if value is None:
return
if not isinstance(value, (six.string_types, bytearray)) and value is not None:
raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value)))
if self.max_length:
if len(value) > self.max_length:
raise ValidationError('{0} is longer than {1} characters'.format(self.column_name, self.max_length))
if self.min_length:
if len(value) < self.min_length:
raise ValidationError('{0} is shorter than {1} characters'.format(self.column_name, self.min_length))
return value
class Integer(Column):
"""
Stores a 32-bit signed integer value
"""
db_type = 'int'
def validate(self, value):
val = super(Integer, self).validate(value)
if val is None:
return
try:
return int(val)
except (TypeError, ValueError):
raise ValidationError("{0} {1} can't be converted to integral value".format(self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class TinyInt(Integer):
"""
Stores an 8-bit signed integer value
.. versionadded:: 2.6.0
requires C* 2.2+ and protocol v4+
"""
db_type = 'tinyint'
class SmallInt(Integer):
"""
Stores a 16-bit signed integer value
.. versionadded:: 2.6.0
requires C* 2.2+ and protocol v4+
"""
db_type = 'smallint'
class BigInt(Integer):
"""
Stores a 64-bit signed integer value
"""
db_type = 'bigint'
class VarInt(Column):
"""
Stores an arbitrary-precision integer
"""
db_type = 'varint'
def validate(self, value):
val = super(VarInt, self).validate(value)
if val is None:
return
try:
return int(val)
except (TypeError, ValueError):
raise ValidationError(
"{0} {1} can't be converted to integral value".format(self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class CounterValueManager(BaseValueManager):
def __init__(self, instance, column, value):
super(CounterValueManager, self).__init__(instance, column, value)
self.value = self.value or 0
self.previous_value = self.previous_value or 0
class Counter(Integer):
"""
Stores a counter that can be inremented and decremented
"""
db_type = 'counter'
value_manager = CounterValueManager
def __init__(self,
index=False,
db_field=None,
required=False):
super(Counter, self).__init__(
primary_key=False,
partition_key=False,
index=index,
db_field=db_field,
default=0,
required=required,
)
class DateTime(Column):
"""
Stores a datetime value
"""
db_type = 'timestamp'
truncate_microseconds = False
"""
Set this ``True`` to have model instances truncate the date, quantizing it in the same way it will be in the database.
This allows equality comparison between assigned values and values read back from the database::
DateTime.truncate_microseconds = True
assert Model.create(id=0, d=datetime.utcnow()) == Model.objects(id=0).first()
Defaults to ``False`` to preserve legacy behavior. May change in the future.
"""
def to_python(self, value):
if value is None:
return
if isinstance(value, datetime):
if DateTime.truncate_microseconds:
us = value.microsecond
truncated_us = us // 1000 * 1000
return value - timedelta(microseconds=us - truncated_us)
else:
return value
elif isinstance(value, date):
return datetime(*(value.timetuple()[:6]))
return datetime.utcfromtimestamp(value)
def to_database(self, value):
value = super(DateTime, self).to_database(value)
if value is None:
return
if not isinstance(value, datetime):
if isinstance(value, date):
value = datetime(value.year, value.month, value.day)
else:
raise ValidationError("{0} '{1}' is not a datetime object".format(self.column_name, value))
epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo)
offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0
return int((get_total_seconds(value - epoch) - offset) * 1000)
class Date(Column):
"""
Stores a simple date, with no time-of-day
.. versionchanged:: 2.6.0
removed overload of Date and DateTime. DateTime is a drop-in replacement for legacy models
requires C* 2.2+ and protocol v4+
"""
db_type = 'date'
def to_database(self, value):
value = super(Date, self).to_database(value)
if value is None:
return
# need to translate to int version because some dates are not representable in
# string form (datetime limitation)
d = value if isinstance(value, util.Date) else util.Date(value)
return d.days_from_epoch + SimpleDateType.EPOCH_OFFSET_DAYS
class Time(Column):
"""
Stores a timezone-naive time-of-day, with nanosecond precision
.. versionadded:: 2.6.0
requires C* 2.2+ and protocol v4+
"""
db_type = 'time'
def to_database(self, value):
value = super(Time, self).to_database(value)
if value is None:
return
# str(util.Time) yields desired CQL encoding
return value if isinstance(value, util.Time) else util.Time(value)
class UUID(Column):
"""
Stores a type 1 or 4 UUID
"""
db_type = 'uuid'
def validate(self, value):
val = super(UUID, self).validate(value)
if val is None:
return
if isinstance(val, _UUID):
return val
if isinstance(val, six.string_types):
try:
return _UUID(val)
except ValueError:
# fall-through to error
pass
raise ValidationError("{0} {1} is not a valid uuid".format(
self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class TimeUUID(UUID):
"""
UUID containing timestamp
"""
db_type = 'timeuuid'
class Boolean(Column):
"""
Stores a boolean True or False value
"""
db_type = 'boolean'
def validate(self, value):
""" Always returns a Python boolean. """
value = super(Boolean, self).validate(value)
if value is not None:
value = bool(value)
return value
def to_python(self, value):
return self.validate(value)
class BaseFloat(Column):
def validate(self, value):
value = super(BaseFloat, self).validate(value)
if value is None:
return
try:
return float(value)
except (TypeError, ValueError):
raise ValidationError("{0} {1} is not a valid float".format(self.column_name, value))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class Float(BaseFloat):
"""
Stores a single-precision floating-point value
"""
db_type = 'float'
class Double(BaseFloat):
"""
Stores a double-precision floating-point value
"""
db_type = 'double'
class Decimal(Column):
"""
Stores a variable precision decimal value
"""
db_type = 'decimal'
def validate(self, value):
from decimal import Decimal as _Decimal
from decimal import InvalidOperation
val = super(Decimal, self).validate(value)
if val is None:
return
try:
return _Decimal(repr(val)) if isinstance(val, float) else _Decimal(val)
except InvalidOperation:
raise ValidationError("{0} '{1}' can't be coerced to decimal".format(self.column_name, val))
def to_python(self, value):
return self.validate(value)
def to_database(self, value):
return self.validate(value)
class BaseCollectionColumn(Column):
"""
Base Container type for collection-like columns.
https://cassandra.apache.org/doc/cql3/CQL.html#collections
"""
def __init__(self, types, **kwargs):
"""
:param types: a sequence of sub types in this collection
"""
instances = []
for t in types:
inheritance_comparator = issubclass if isinstance(t, type) else isinstance
if not inheritance_comparator(t, Column):
raise ValidationError("%s is not a column class" % (t,))
if t.db_type is None:
raise ValidationError("%s is an abstract type" % (t,))
inst = t() if isinstance(t, type) else t
if isinstance(t, BaseCollectionColumn):
inst._freeze_db_type()
instances.append(inst)
self.types = instances
super(BaseCollectionColumn, self).__init__(**kwargs)
def validate(self, value):
value = super(BaseCollectionColumn, self).validate(value)
# It is dangerous to let collections have more than 65535.
# See: https://issues.apache.org/jira/browse/CASSANDRA-5428
if value is not None and len(value) > 65535:
raise ValidationError("{0} Collection can't have more than 65535 elements.".format(self.column_name))
return value
def _val_is_null(self, val):
return not val
def _freeze_db_type(self):
if not self.db_type.startswith('frozen'):
self.db_type = "frozen<%s>" % (self.db_type,)
@property
def sub_types(self):
return self.types
@property
def cql_type(self):
return _cqltypes[self.__class__.__name__.lower()].apply_parameters([c.cql_type for c in self.types])
class Tuple(BaseCollectionColumn):
"""
Stores a fixed-length set of positional values
http://docs.datastax.com/en/cql/3.1/cql/cql_reference/tupleType.html
"""
def __init__(self, *args, **kwargs):
"""
:param args: column types representing tuple composition
"""
if not args:
raise ValueError("Tuple must specify at least one inner type")
super(Tuple, self).__init__(args, **kwargs)
self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in self.types))
def validate(self, value):
val = super(Tuple, self).validate(value)
if val is None:
return
if len(val) > len(self.types):
raise ValidationError("Value %r has more fields than tuple definition (%s)" %
(val, ', '.join(t for t in self.types)))
return tuple(t.validate(v) for t, v in zip(self.types, val))
def to_python(self, value):
if value is None:
return tuple()
return tuple(t.to_python(v) for t, v in zip(self.types, value))
def to_database(self, value):
if value is None:
return
return tuple(t.to_database(v) for t, v in zip(self.types, value))
class BaseContainerColumn(BaseCollectionColumn):
pass
class Set(BaseContainerColumn):
"""
Stores a set of unordered, unique values
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_set_t.html
"""
_python_type_hashable = False
def __init__(self, value_type, strict=True, default=set, **kwargs):
"""
:param value_type: a column class indicating the types of the value
:param strict: sets whether non set values will be coerced to set
type on validation, or raise a validation error, defaults to True
"""
self.strict = strict
super(Set, self).__init__((value_type,), default=default, **kwargs)
self.value_col = self.types[0]
if not self.value_col._python_type_hashable:
raise ValidationError("Cannot create a Set with unhashable value type (see PYTHON-494)")
self.db_type = 'set<{0}>'.format(self.value_col.db_type)
def validate(self, value):
val = super(Set, self).validate(value)
if val is None:
return
types = (set, util.SortedSet) if self.strict else (set, util.SortedSet, list, tuple)
if not isinstance(val, types):
if self.strict:
raise ValidationError('{0} {1} is not a set object'.format(self.column_name, val))
else:
raise ValidationError('{0} {1} cannot be coerced to a set object'.format(self.column_name, val))
if None in val:
raise ValidationError("{0} None not allowed in a set".format(self.column_name))
# TODO: stop doing this conversion because it doesn't support non-hashable collections as keys (cassandra does)
# will need to start using the cassandra.util types in the next major rev (PYTHON-494)
return set(self.value_col.validate(v) for v in val)
def to_python(self, value):
if value is None:
return set()
return set(self.value_col.to_python(v) for v in value)
def to_database(self, value):
if value is None:
return None
return set(self.value_col.to_database(v) for v in value)
class List(BaseContainerColumn):
"""
Stores a list of ordered values
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_list_t.html
"""
_python_type_hashable = False
def __init__(self, value_type, default=list, **kwargs):
"""
:param value_type: a column class indicating the types of the value
"""
super(List, self).__init__((value_type,), default=default, **kwargs)
self.value_col = self.types[0]
self.db_type = 'list<{0}>'.format(self.value_col.db_type)
def validate(self, value):
val = super(List, self).validate(value)
if val is None:
return
if not isinstance(val, (set, list, tuple)):
raise ValidationError('{0} {1} is not a list object'.format(self.column_name, val))
if None in val:
raise ValidationError("{0} None is not allowed in a list".format(self.column_name))
return [self.value_col.validate(v) for v in val]
def to_python(self, value):
if value is None:
return []
return [self.value_col.to_python(v) for v in value]
def to_database(self, value):
if value is None:
return None
return [self.value_col.to_database(v) for v in value]
class Map(BaseContainerColumn):
"""
Stores a key -> value map (dictionary)
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/use_map_t.html
"""
_python_type_hashable = False
def __init__(self, key_type, value_type, default=dict, **kwargs):
"""
:param key_type: a column class indicating the types of the key
:param value_type: a column class indicating the types of the value
"""
super(Map, self).__init__((key_type, value_type), default=default, **kwargs)
self.key_col = self.types[0]
self.value_col = self.types[1]
if not self.key_col._python_type_hashable:
raise ValidationError("Cannot create a Map with unhashable key type (see PYTHON-494)")
self.db_type = 'map<{0}, {1}>'.format(self.key_col.db_type, self.value_col.db_type)
def validate(self, value):
val = super(Map, self).validate(value)
if val is None:
return
if not isinstance(val, (dict, util.OrderedMap)):
raise ValidationError('{0} {1} is not a dict object'.format(self.column_name, val))
if None in val:
raise ValidationError("{0} None is not allowed in a map".format(self.column_name))
# TODO: stop doing this conversion because it doesn't support non-hashable collections as keys (cassandra does)
# will need to start using the cassandra.util types in the next major rev (PYTHON-494)
return dict((self.key_col.validate(k), self.value_col.validate(v)) for k, v in val.items())
def to_python(self, value):
if value is None:
return {}
if value is not None:
return dict((self.key_col.to_python(k), self.value_col.to_python(v)) for k, v in value.items())
def to_database(self, value):
if value is None:
return None
return dict((self.key_col.to_database(k), self.value_col.to_database(v)) for k, v in value.items())
class UDTValueManager(BaseValueManager):
@property
def changed(self):
return self.value != self.previous_value or (self.value is not None and self.value.has_changed_fields())
def reset_previous_value(self):
if self.value is not None:
self.value.reset_changed_fields()
self.previous_value = copy(self.value)
class UserDefinedType(Column):
"""
User Defined Type column
http://www.datastax.com/documentation/cql/3.1/cql/cql_using/cqlUseUDT.html
These columns are represented by a specialization of :class:`cassandra.cqlengine.usertype.UserType`.
Please see :ref:`user_types` for examples and discussion.
"""
value_manager = UDTValueManager
def __init__(self, user_type, **kwargs):
"""
:param type user_type: specifies the :class:`~.cqlengine.usertype.UserType` model of the column
"""
self.user_type = user_type
self.db_type = "frozen<%s>" % user_type.type_name()
super(UserDefinedType, self).__init__(**kwargs)
@property
def sub_types(self):
return list(self.user_type._fields.values())
@property
def cql_type(self):
return UserType.make_udt_class(keyspace='', udt_name=self.user_type.type_name(),
field_names=[c.db_field_name for c in self.user_type._fields.values()],
field_types=[c.cql_type for c in self.user_type._fields.values()])
def resolve_udts(col_def, out_list):
for col in col_def.sub_types:
resolve_udts(col, out_list)
if isinstance(col_def, UserDefinedType):
out_list.append(col_def.user_type)
class _PartitionKeysToken(Column):
"""
virtual column representing token of partition columns.
Used by filter(pk__token=Token(...)) filters
"""
def __init__(self, model):
self.partition_columns = model._partition_keys.values()
super(_PartitionKeysToken, self).__init__(partition_key=True)
@property
def db_field_name(self):
return 'token({0})'.format(', '.join(['"{0}"'.format(c.db_field_name) for c in self.partition_columns]))
|
jvkops/titanium_mobile
|
refs/heads/master
|
node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
|
240
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp -af "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
pick_output = ' '.join(outputs)
else:
order_insert = ''
pick_output = outputs[0]
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if not order_only and len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
# Add a dummy command to the "extra outputs" rule, otherwise make seems to
# think these outputs haven't (couldn't have?) changed, and thus doesn't
# flag them as changed (i.e. include in '$?') when evaluating dependent
# rules, which in turn causes do_cmd() to skip running dependent commands.
self.WriteLn('%s: ;' % (' '.join(outputs[1:])))
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'aix':
header_params.update({
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
jamespcole/home-assistant
|
refs/heads/master
|
homeassistant/components/apcupsd/sensor.py
|
2
|
"""Support for APCUPSd sensors."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.components import apcupsd
from homeassistant.const import (TEMP_CELSIUS, CONF_RESOURCES, POWER_WATT)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [apcupsd.DOMAIN]
SENSOR_PREFIX = 'UPS '
SENSOR_TYPES = {
'alarmdel': ['Alarm Delay', '', 'mdi:alarm'],
'ambtemp': ['Ambient Temperature', '', 'mdi:thermometer'],
'apc': ['Status Data', '', 'mdi:information-outline'],
'apcmodel': ['Model', '', 'mdi:information-outline'],
'badbatts': ['Bad Batteries', '', 'mdi:information-outline'],
'battdate': ['Battery Replaced', '', 'mdi:calendar-clock'],
'battstat': ['Battery Status', '', 'mdi:information-outline'],
'battv': ['Battery Voltage', 'V', 'mdi:flash'],
'bcharge': ['Battery', '%', 'mdi:battery'],
'cable': ['Cable Type', '', 'mdi:ethernet-cable'],
'cumonbatt': ['Total Time on Battery', '', 'mdi:timer'],
'date': ['Status Date', '', 'mdi:calendar-clock'],
'dipsw': ['Dip Switch Settings', '', 'mdi:information-outline'],
'dlowbatt': ['Low Battery Signal', '', 'mdi:clock-alert'],
'driver': ['Driver', '', 'mdi:information-outline'],
'dshutd': ['Shutdown Delay', '', 'mdi:timer'],
'dwake': ['Wake Delay', '', 'mdi:timer'],
'endapc': ['Date and Time', '', 'mdi:calendar-clock'],
'extbatts': ['External Batteries', '', 'mdi:information-outline'],
'firmware': ['Firmware Version', '', 'mdi:information-outline'],
'hitrans': ['Transfer High', 'V', 'mdi:flash'],
'hostname': ['Hostname', '', 'mdi:information-outline'],
'humidity': ['Ambient Humidity', '%', 'mdi:water-percent'],
'itemp': ['Internal Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'lastxfer': ['Last Transfer', '', 'mdi:transfer'],
'linefail': ['Input Voltage Status', '', 'mdi:information-outline'],
'linefreq': ['Line Frequency', 'Hz', 'mdi:information-outline'],
'linev': ['Input Voltage', 'V', 'mdi:flash'],
'loadpct': ['Load', '%', 'mdi:gauge'],
'loadapnt': ['Load Apparent Power', '%', 'mdi:gauge'],
'lotrans': ['Transfer Low', 'V', 'mdi:flash'],
'mandate': ['Manufacture Date', '', 'mdi:calendar'],
'masterupd': ['Master Update', '', 'mdi:information-outline'],
'maxlinev': ['Input Voltage High', 'V', 'mdi:flash'],
'maxtime': ['Battery Timeout', '', 'mdi:timer-off'],
'mbattchg': ['Battery Shutdown', '%', 'mdi:battery-alert'],
'minlinev': ['Input Voltage Low', 'V', 'mdi:flash'],
'mintimel': ['Shutdown Time', '', 'mdi:timer'],
'model': ['Model', '', 'mdi:information-outline'],
'nombattv': ['Battery Nominal Voltage', 'V', 'mdi:flash'],
'nominv': ['Nominal Input Voltage', 'V', 'mdi:flash'],
'nomoutv': ['Nominal Output Voltage', 'V', 'mdi:flash'],
'nompower': ['Nominal Output Power', POWER_WATT, 'mdi:flash'],
'nomapnt': ['Nominal Apparent Power', 'VA', 'mdi:flash'],
'numxfers': ['Transfer Count', '', 'mdi:counter'],
'outcurnt': ['Output Current', 'A', 'mdi:flash'],
'outputv': ['Output Voltage', 'V', 'mdi:flash'],
'reg1': ['Register 1 Fault', '', 'mdi:information-outline'],
'reg2': ['Register 2 Fault', '', 'mdi:information-outline'],
'reg3': ['Register 3 Fault', '', 'mdi:information-outline'],
'retpct': ['Restore Requirement', '%', 'mdi:battery-alert'],
'selftest': ['Last Self Test', '', 'mdi:calendar-clock'],
'sense': ['Sensitivity', '', 'mdi:information-outline'],
'serialno': ['Serial Number', '', 'mdi:information-outline'],
'starttime': ['Startup Time', '', 'mdi:calendar-clock'],
'statflag': ['Status Flag', '', 'mdi:information-outline'],
'status': ['Status', '', 'mdi:information-outline'],
'stesti': ['Self Test Interval', '', 'mdi:information-outline'],
'timeleft': ['Time Left', '', 'mdi:clock-alert'],
'tonbatt': ['Time on Battery', '', 'mdi:timer'],
'upsmode': ['Mode', '', 'mdi:information-outline'],
'upsname': ['Name', '', 'mdi:information-outline'],
'version': ['Daemon Info', '', 'mdi:information-outline'],
'xoffbat': ['Transfer from Battery', '', 'mdi:transfer'],
'xoffbatt': ['Transfer from Battery', '', 'mdi:transfer'],
'xonbatt': ['Transfer to Battery', '', 'mdi:transfer'],
}
SPECIFIC_UNITS = {
'ITEMP': TEMP_CELSIUS
}
INFERRED_UNITS = {
' Minutes': 'min',
' Seconds': 'sec',
' Percent': '%',
' Volts': 'V',
' Ampere': 'A',
' Volt-Ampere': 'VA',
' Watts': POWER_WATT,
' Hz': 'Hz',
' C': TEMP_CELSIUS,
' Percent Load Capacity': '%',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCES, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the APCUPSd sensors."""
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
if sensor_type not in SENSOR_TYPES:
SENSOR_TYPES[sensor_type] = [
sensor_type.title(), '', 'mdi:information-outline']
if sensor_type.upper() not in apcupsd.DATA.status:
_LOGGER.warning(
"Sensor type: %s does not appear in the APCUPSd status output",
sensor_type)
entities.append(APCUPSdSensor(apcupsd.DATA, sensor_type))
add_entities(entities, True)
def infer_unit(value):
"""If the value ends with any of the units from ALL_UNITS.
Split the unit off the end of the value and return the value, unit tuple
pair. Else return the original value and None as the unit.
"""
from apcaccess.status import ALL_UNITS
for unit in ALL_UNITS:
if value.endswith(unit):
return value[:-len(unit)], INFERRED_UNITS.get(unit, unit.strip())
return value, None
class APCUPSdSensor(Entity):
"""Representation of a sensor entity for APCUPSd status values."""
def __init__(self, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = SENSOR_PREFIX + SENSOR_TYPES[sensor_type][0]
self._unit = SENSOR_TYPES[sensor_type][1]
self._inferred_unit = None
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return true if the UPS is online, else False."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if not self._unit:
return self._inferred_unit
return self._unit
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self.type.upper() not in self._data.status:
self._state = None
self._inferred_unit = None
else:
self._state, self._inferred_unit = infer_unit(
self._data.status[self.type.upper()])
|
chouseknecht/ansible
|
refs/heads/devel
|
test/units/modules/network/onyx/test_onyx_lldp.py
|
23
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_lldp
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxInterfaceModule(TestOnyxModule):
module = onyx_lldp
def setUp(self):
super(TestOnyxInterfaceModule, self).setUp()
self.mock_get_config = patch.object(
onyx_lldp.OnyxLldpModule, "_get_lldp_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxInterfaceModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if commands == ['lldp']:
self.get_config.return_value = None
else:
config_file = 'onyx_lldp_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_lldp_no_change(self):
set_module_args(dict())
self.execute_module(changed=False)
def test_lldp_disable(self):
set_module_args(dict(state='absent'))
commands = ['no lldp']
self.execute_module(changed=True, commands=commands)
def test_lldp_enable(self):
set_module_args(dict(state='present'))
commands = ['lldp']
self.execute_module(changed=True, commands=commands)
|
rbbratta/virt-test
|
refs/heads/master
|
libvirt/tests/src/virsh_cmd/domain/virsh_setvcpus.py
|
1
|
import re, os, logging, commands
from autotest.client.shared import error
from virttest import remote, libvirt_vm, virsh, libvirt_xml
from xml.dom.minidom import parse
def run_virsh_setvcpus(test, params, env):
"""
Test command: virsh setvcpus.
The conmand can change the number of virtual CPUs in the guest domain.
1.Prepare test environment,destroy or suspend a VM.
2.Perform virsh setvcpus operation.
3.Recover test environment.
4.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
xml_file = params.get("setvcpus_xml_file", "vm.xml")
virsh.dumpxml(vm_name, extra="", to_file=xml_file)
tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
pre_vm_state = params.get("setvcpus_pre_vm_state")
command = params.get("setvcpus_command", "setvcpus")
options = params.get("setvcpus_options")
domain = params.get("setvcpus_domain")
count = params.get("setvcpus_count")
extra_param = params.get("setvcpus_extra_param")
count_option = "%s %s" % (count, extra_param)
status_error = params.get("status_error")
def get_current_vcpus():
"""
Get current vcpu number.
"""
vcpus_set = ""
virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
dom = parse(tmp_file)
root = dom.documentElement
vcpus_2 = root.getElementsByTagName("vcpu")
for n in vcpus_2:
vcpus_set += n.getAttribute("current")
vcpus_set = int(vcpus_set)
dom.unlink()
return vcpus_set
if vm.is_alive():
vm.destroy()
vm_xml = libvirt_xml.VMXML()
vm_xml.set_vm_vcpus(vm_name, 2)
vm.start()
vm.wait_for_login()
if status_error == "no":
vcpus_new = len(vm.vcpuinfo())
domid = vm.get_id()
domuuid = vm.get_uuid()
if pre_vm_state == "paused":
vm.pause()
elif pre_vm_state == "shut off":
vm.destroy()
if domain == "remote_name":
remote_ssh_addr = params.get("remote_ip", None)
remote_addr = params.get("local_ip", None)
remote_password = params.get("remote_password", None)
host_type = virsh.driver()
if host_type == "qemu":
remote_string = "qemu+ssh://%s/system" % remote_addr
elif host_type == "xen":
remote_string = "xen+ssh://%s" % remote_addr
command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
if virsh.has_command_help_match(command, "--live") == None:
status_error = "yes"
session = remote.remote_login("ssh", remote_ssh_addr, "22", "root", remote_password, "#")
session.cmd_output('LANG=C')
status, output = session.cmd_status_output(command, internal_timeout=5)
session.close()
vcpus_current = len(vm.vcpuinfo())
else:
if domain == "name":
dom_option = vm_name
elif domain == "id":
dom_option = domid
if params.get("setvcpus_hex_id") != None:
dom_option = hex(int(domid))
elif params.get("setvcpus_invalid_id") != None:
dom_option = params.get("setvcpus_invalid_id")
elif domain == "uuid":
dom_option = domuuid
if params.get("setvcpus_invalid_uuid") != None:
dom_option = params.get("setvcpus_invalid_uuid")
else:
dom_option = domain
option_list = options.split(" ")
for item in option_list:
if virsh.has_command_help_match(command, item) == None:
status_error = "yes"
break
status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True).exit_status
if pre_vm_state == "paused":
virsh.resume(vm_name, ignore_status=True)
if status_error == "no":
if status == 0:
if pre_vm_state == "shut off":
if options == "--config":
vcpus_set = len(vm.vcpuinfo())
elif options == "--current":
vcpus_set = get_current_vcpus()
elif options == "--maximum --config":
vcpus_set = ""
dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
vcpus_set = dom.getElementsByTagName("vcpu")[0].firstChild.data
vcpus_set = int(vcpus_set)
dom.unlink()
else:
vcpus_set = len(vm.vcpuinfo())
if domain == "id":
cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
output1 = commands.getoutput(cmd_chk)
logging.info("guest-info:\n%s" % output1)
virsh.destroy(vm_name)
virsh.undefine(vm_name)
virsh.define(xml_file)
if os.path.exists(xml_file):
os.remove(xml_file)
if os.path.exists(tmp_file):
os.remove(tmp_file)
#check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
else:
if status != 0:
raise error.TestFail("Run failed with right command")
else:
if options == "--maximum --config":
if vcpus_set != 4:
raise error.TestFail("Run failed with right command1")
elif domain == "id":
if options == "--config":
if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
raise error.TestFail("Run failed with right command2")
elif options == "--config --live":
if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
raise error.TestFail("Run failed with right command3")
else:
if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
raise error.TestFail("Run failed with right command4")
else:
if vcpus_set != 1:
raise error.TestFail("Run failed with right command5")
|
nsi-iff/nsi_site
|
refs/heads/master
|
web_steps.py
|
1
|
import re
from lettuce import world, step
from should_dsl import should
from paths import path_to
@step(r'I fill in "(.*)" with "(.*)"')
def fill_field(step, label, value):
world.browser.fill(label, value)
@step(r'I go to "(.+)"')
def i_go_to(step, page_name):
world.browser.visit(path_to(page_name))
@step(r'I press "(.*)"')
def press_button(step, name):
world.browser.find_by_name(name.lower()).click()
@step(u'I click "(.*)"')
def i_click(step, link):
world.browser.find_link_by_text(link).first.click()
# a "little" help from http://love-python.blogspot.com/2008/07/strip-html-tags-using-python.html
def remove_html_tags(data):
p = re.compile(r'<.*?>')
return p.sub('', data)
def remove_extra_spaces(data):
p = re.compile(r'\s+')
return p.sub(' ', data)
@step(r'I should see "(.*)"$')
def i_should_see(step, content):
page_content = remove_extra_spaces(remove_html_tags(world.browser.html))
page_content |should| contain(content)
@step(u'I should have "(.*)" as HTML')
def i_should_have_as_html(step, html_output):
world.browser.html |should| contain(html_output)
@step(u'I should see an image called "(.*)"')
def and_i_should_see_an_image_called_group1(step, image_name):
images = world.browser.find_by_css('img')
found_image = [image for image in images if image['src'].endswith(image_name)]
found_image |should| have_at_least(1).image
@step(u'I should see a link to "(.*)" with label "(.*)"')
def i_should_see_a_link_to_with_label(step, link_href, link_text):
links = world.browser.find_link_by_text(link_text)
links |should| have_at_least(1).item
link = links[0]
link['href'] |should| end_with(link_href)
@step(u'I should see a link with text "(.*)"')
def i_should_see_a_link_with_text(step, link_text):
world.browser.find_link_by_text(link_text) |should| have(1).item
|
Fale/fedora-packages
|
refs/heads/develop
|
fedoracommunity/distutils/__init__.py
|
12133432
| |
xuweiliang/Codelibrary
|
refs/heads/master
|
openstack_dashboard/test/jasmine/__init__.py
|
12133432
| |
erikr/django
|
refs/heads/master
|
tests/view_tests/tests/__init__.py
|
12133432
| |
MrPetru/spam
|
refs/heads/master
|
spam/controllers/tag.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of SPAM (Spark Project & Asset Manager).
#
# SPAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPAM. If not, see <http://www.gnu.org/licenses/>.
#
# Original Copyright (c) 2010, Lorenzo Pierfederici <lpierfederici@gmail.com>
# Contributor(s):
#
"""Tag controller"""
from tg import expose, url, tmpl_context, redirect, validate, require
from tg.controllers import RestController
from pylons.i18n import ugettext as _, ungettext as n_, lazy_ugettext as l_
from spam.model import session_get, taggable_get, tag_get, Tag
from spam.lib.widgets import FormTagNew, FormTagConfirm, FormTagRemove
#from spam.lib.widgets import BoxTags
from spam.lib.notifications import notify, TOPIC_TAGS
from spam.lib.journaling import journal
from repoze.what.predicates import in_group
import logging
log = logging.getLogger(__name__)
# form widgets
f_new = FormTagNew(action=url('/tag'))
f_confirm = FormTagConfirm(action=url('/tag'))
f_remove = FormTagRemove(action=url('/tag'))
# live widgets
#b_tags = BoxTags()
class Controller(RestController):
"""REST controller for managing tags.
In addition to the standard REST verbs this controller defines the following
REST-like methods:
* ``remove`` (:meth:`remove`)
"""
@require(in_group('administrators'))
@expose('spam.templates.tags.get_all')
def get_all(self, taggable_id):
"""Return a html fragment with a list of tags for this object."""
# tmpl_context.b_tags = b_tags
taggable = taggable_get(taggable_id)
return dict(tags=taggable.tags)
@expose('spam.templates.tags.get_all')
def _default(self, taggable_id, *args, **kwargs):
"""Catch request to `tag/<something>' and pass them to :meth:`get_all`,
because RESTController doesn't dispatch to get_all when there are
arguments.
"""
return self.get_all(taggable_id)
@require(in_group('administrators'))
@expose('json')
@expose('spam.templates.tags.get_one')
def get_one(self, taggable_id, tag_id):
"""This method is currently unused, but is needed for the
RESTController to work."""
tag = tag_get(tag_id)
return dict(tag=tag)
@require(in_group('administrators'))
@expose('spam.templates.forms.form')
def new(self, taggable_id, **kwargs):
"""Display a NEW form."""
session = session_get()
taggable = taggable_get(taggable_id)
f_new.value = dict(taggable_id=taggable.id,
current_tags_=', '.join([t.id for t in taggable.tags]),
)
tags = session.query(Tag).order_by('id')
choices = [t.id for t in tags if t not in taggable.tags]
f_new.child.children.tagids.options = choices
tmpl_context.form = f_new
return dict(title='%s %s' % (_('Add tags to:'), taggable.tagged.path))
@require(in_group('administrators'))
@expose('json')
@expose('spam.templates.forms.result')
@validate(f_new, error_handler=new)
def post(self, taggable_id, tagids=[], new_tags=None):
"""Add tags to a ``taggable`` obect."""
session = session_get()
user = tmpl_context.user
taggable = taggable_get(taggable_id)
if isinstance(tagids, list):
tags = [tag_get(i) for i in tagids]
else:
tags = [tag_get(tagids)]
if new_tags:
tags.extend([tag_get(name) for name in new_tags.split(', ')])
added_tags = []
updates = []
for tag in tags:
if tag not in taggable.tags:
taggable.tags.append(tag)
added_tags.append(tag)
# prepare updates to notify clients
updates.append(dict(item=tag, type='added', topic=TOPIC_TAGS,
filter=taggable_id))
if added_tags:
added = ', '.join([t.id for t in added_tags])
msg = '%s %s %s' % (added,
n_('tag added to:',
'tags added to:', len(added_tags)),
taggable_id)
status = 'ok'
# notify clients
notify.send(updates)
# log into Journal
journal.add(user, '%s - %s' % (msg, taggable.tagged))
else:
msg = _('No new tag applied')
status = 'info'
return dict(msg=msg, status=status, updates=updates)
@require(in_group('administrators'))
@expose('spam.templates.forms.form')
def get_delete(self, tag_id, **kwargs):
"""Display a DELETE confirmation form."""
tag = tag_get(tag_id)
f_confirm.custom_method = 'DELETE'
f_confirm.value = dict(tag_id=tag.id)
tmpl_context.form = f_confirm
return dict(title='%s %s?' % (_('Are you sure you want to delete tag:'),
tag.id))
@require(in_group('administrators'))
@expose('json')
@expose('spam.templates.forms.result')
@validate(f_confirm, error_handler=get_delete)
def post_delete(self, tag_id):
"""Delete a tag."""
session = session_get()
user = tmpl_context.user
tag = tag_get(tag_id)
session.delete(tag)
msg = '%s %s' % (_('Deleted tag:'), tag.id)
# log into Journal
journal.add(user, '%s - %s' % (msg, tag))
# notify clients
updates = [dict(item=tag, type='deleted', topic=TOPIC_TAGS)]
notify.send(updates)
return dict(msg=msg, status='ok', updates=updates)
# Custom REST-like actions
_custom_actions = ['remove']
@require(in_group('administrators'))
@expose('json')
@expose('spam.templates.forms.result')
@validate(f_remove)
def remove(self, taggable_id, tagids=[]):
"""Remove tags from an object."""
session = session_get()
user = tmpl_context.user
taggable = taggable_get(taggable_id)
if isinstance(tagids, list):
tags = [tag_get(i) for i in tagids]
else:
tags = [tag_get(tagids)]
removed_tags = []
updates = []
for tag in tags:
if tag in taggable.tags:
taggable.tags.remove(tag)
removed_tags.append(tag)
# prepare updates
updates.append(dict(item=tag, type='deleted', topic=TOPIC_TAGS,
filter=taggable_id))
if removed_tags:
removed = ', '.join([t.id for t in removed_tags])
msg = '%s %s %s' % (removed,
n_('tag removed from:',
'tags removed from:', len(removed_tags)),
taggable_id)
status = 'ok'
# notify clients
notify.send(updates)
# log into Journal
journal.add(user, '%s - %s' % (msg, taggable.tagged))
else:
msg = _('No tag removed')
status = 'info'
return dict(msg=msg, status=status, updates=updates)
|
apigee/edx-platform
|
refs/heads/master
|
common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py
|
189
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TrackingLog.host'
db.add_column('track_trackinglog', 'host',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Changing field 'TrackingLog.event_type'
db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'TrackingLog.page'
db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=512, null=True))
def backwards(self, orm):
# Deleting field 'TrackingLog.host'
db.delete_column('track_trackinglog', 'host')
# Changing field 'TrackingLog.event_type'
db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TrackingLog.page'
db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True))
models = {
'track.trackinglog': {
'Meta': {'object_name': 'TrackingLog'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['track']
|
gminds/rapidnewsng
|
refs/heads/master
|
django/core/management/validation.py
|
103
|
import collections
import sys
from django.conf import settings
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.itercompat import is_iterable
from django.utils import six
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR(force_str("%s: %s\n" % (context, error))))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app, include_swapped=True):
opts = cls._meta
# Check swappable attribute.
if opts.swapped:
try:
app_label, model_name = opts.swapped.split('.')
except ValueError:
e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable)
continue
if not models.get_model(app_label, model_name):
e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped)
# No need to perform any other validation checks on a swapped model.
continue
# If this is the current User model, check known validation problems with User models
if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name):
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.')
# Check that the username field is unique
if not opts.get_field(cls.USERNAME_FIELD).unique:
e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.')
# Model isn't swapped; do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders):
e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name)
if f.choices:
if isinstance(f.choices, six.string_types) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Make sure the related field specified by a ForeignKey is unique
if not f.rel.to._meta.get_field(f.rel.field_name).unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, six.string_types):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, six.string_types):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?':
continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
validate_local_fields(e, opts, "unique_together", ut)
if not isinstance(opts.index_together, collections.Sequence):
e.add(opts, '"index_together" must a sequence')
else:
for it in opts.index_together:
validate_local_fields(e, opts, "index_together", it)
return len(e.errors)
def validate_local_fields(e, opts, field_name, fields):
from django.db import models
if not isinstance(fields, collections.Sequence):
e.add(opts, 'all %s elements must be sequences' % field_name)
else:
for field in fields:
try:
f = opts.get_field(field, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"%s" refers to %s, a field that doesn\'t exist.' % (field_name, field))
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"%s" refers to %s. ManyToManyFields are not supported in %s.' % (field_name, f.name, field_name))
if f not in opts.local_fields:
e.add(opts, '"%s" refers to %s. This is not in the same model as the %s statement.' % (field_name, f.name, field_name))
|
dantin/scrapy-utils
|
refs/heads/master
|
src/scrapy_utils/middlewares.py
|
1
|
# -*- coding: utf-8 -*-
import logging
import os
import random
from scrapy import signals
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
logger = logging.getLogger(__name__)
class RandomUserAgentMiddleware(UserAgentMiddleware):
DEFAULT_USER_AGENT = os.path.normpath(os.path.dirname(__file__) + '/data/user_agent.dat')
def __init__(self, settings, user_agent='Scrapy'):
super(RandomUserAgentMiddleware, self).__init__()
self.user_agent = user_agent
user_agent_list_file = settings.get('USER_AGENT_LIST')
if not user_agent_list_file:
# If USER_AGENT_LIST_FILE settings is not set,
# Use the default UserAgent from 'data/user_agent.dat'
logger.info('use agent file: %s', self.DEFAULT_USER_AGENT)
with open(self.DEFAULT_USER_AGENT, 'r') as f:
self.user_agent_list = [line.strip() for line in f.readlines()]
else:
logger.info('use agent file: %s', user_agent_list_file)
with open(user_agent_list_file, 'r') as f:
self.user_agent_list = [line.strip() for line in f.readlines()]
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings)
crawler.signals.connect(
obj.spider_opened,
signal=signals.spider_opened)
return obj
def process_request(self, request, spider):
user_agent = random.choice(self.user_agent_list)
if user_agent:
request.headers.setdefault('User-Agent', user_agent)
|
ruibarreira/linuxtrail
|
refs/heads/master
|
usr/lib/python2.7/dist-packages/zeitgeist/datamodel.py
|
3
|
# -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2009 Mikkel Kamstrup Erlandsen <mikkel.kamstrup@gmail.com>
# Copyright © 2009 Markus Korn <thekorn@gmx.de>
# Copyright © 2009-2010 Seif Lotfy <seif@lotfy.com>
# Copyright © 2009-2010 Siegfried-Angel Gevatter Pujals <rainct@ubuntu.com>
# Copyright © 2011 Collabora Ltd.
# By Siegfried-Angel Gevatter Pujals <rainct@ubuntu.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import gettext
import time
import sys
gettext.install("zeitgeist", unicode=1)
__all__ = [
'Interpretation',
'Manifestation',
'ResultType',
'RelevantResultType',
'StorageState',
'TimeRange',
'DataSource',
'Event',
'Subject',
'NULL_EVENT',
'NEGATION_OPERATOR',
]
NEGATION_OPERATOR = "!"
WILDCARD = "*"
def EQUAL(x, y):
"""checks if both given arguments are equal"""
return x == y
def STARTSWITH(x, y):
"""checks if 'x' startswith 'y'"""
return x.startswith(y)
NEEDS_CHILD_RESOLUTION = set()
def get_timestamp_for_now():
"""
Return the current time in milliseconds since the Unix Epoch.
"""
return int(time.time() * 1000)
class EnumValue(int):
"""Class which behaves like an int, but has an additional docstring"""
def __new__(cls, value, doc=""):
obj = super(EnumValue, cls).__new__(EnumValue, value)
obj.__doc__ = "%s. ``(Integer value: %i)``" %(doc, obj)
return obj
def isCamelCase(text):
return text and text[0].isupper() and " " not in text
def get_name_or_str(obj):
try:
return str(obj.name)
except AttributeError:
return str(obj)
_SYMBOLS_BY_URI = {}
class Symbol(str):
def __new__(cls, name, parent=None, uri=None, display_name=None, doc=None, auto_resolve=True):
if not isCamelCase(name):
raise ValueError("Naming convention requires symbol name to be CamelCase, got '%s'" %name)
return super(Symbol, cls).__new__(Symbol, uri or name)
def __init__(self, name, parent=None, uri=None, display_name=None, doc=None, auto_resolve=True):
self._children = dict()
self._all_children = None
self._parents = parent or set() # will be bootstrapped to a dict at module load time
assert isinstance(self._parents, set), name
self._name = name
self._uri = uri
self._display_name = display_name
self._doc = doc
_SYMBOLS_BY_URI[uri] = self
def __repr__(self):
return "<%s '%s'>" %(get_name_or_str(self), self.uri)
def __getattr__(self, name):
self._ensure_all_children()
try:
return self._all_children[name]
except KeyError:
for child in self.iter_all_children():
if child == self:
continue
try:
return getattr(child, name)
except AttributeError:
pass
raise AttributeError("'%s' object has no attribute '%s'" %(self.__class__.__name__, name))
def __getitem__ (self, uri):
return _SYMBOLS_BY_URI[uri]
def _ensure_all_children (self):
if self._all_children is not None : return
self._all_children = dict()
for child in self._children.itervalues():
child._visit(self._all_children)
def _visit (self, dikt):
dikt[self.name] = self
for child in self._children.itervalues():
child._visit(dikt)
@staticmethod
def find_child_uris_extended (uri):
"""
Creates a list of all known child Symbols of `uri`, including
`uri` itself in the list. Hence the "extended". If `uri`
is unknown a list containing only `uri` is returned.
"""
try:
symbol = _SYMBOLS_BY_URI[uri]
children = list(symbol.get_all_children())
children.append(uri)
return children
except KeyError, e:
return [uri]
@property
def uri(self):
return self._uri or self.name
@property
def display_name(self):
return self._display_name or ""
@property
def name(self):
return self._name
__name__ = name
def __dir__(self):
self._ensure_all_children()
return self._all_children.keys()
@property
def doc(self):
return self._doc or ""
@property
def __doc__(self):
return "%s\n\n %s. ``(Display name: '%s')``" %(self.uri, self.doc.rstrip("."), self.display_name)
def get_children(self):
"""
Returns a list of immediate child symbols
"""
return frozenset(self._children.itervalues())
def iter_all_children(self):
"""
Returns a generator that recursively iterates over all children
of this symbol
"""
self._ensure_all_children()
return self._all_children.itervalues()
def get_all_children(self):
"""
Return a read-only set containing all children of this symbol
"""
return frozenset(self.iter_all_children())
def get_parents(self):
"""
Returns a list of immediate parent symbols
"""
return frozenset(self._parents.itervalues())
def is_child_of (self, parent):
"""
Returns True if this symbol is a child of `parent`.
"""
if not isinstance (parent, Symbol):
try:
parent = _SYMBOLS_BY_URI[parent]
except KeyError, e:
# Parent is not a known URI
return self.uri == parent
# Invariant: parent is a Symbol
if self.uri == parent.uri : return True
parent._ensure_all_children()
# FIXME: We should really check that child.uri is in there,
# but that is not fast with the current code layout
return self.name in parent._all_children
@staticmethod
def uri_is_child_of (child, parent):
"""
Returns True if `child` is a child of `parent`. Both `child`
and `parent` arguments must be any combination of
:class:`Symbol` and/or string.
"""
if isinstance (child, basestring):
try:
child = _SYMBOLS_BY_URI[child]
except KeyError, e:
# Child is not a know URI
if isinstance (parent, basestring):
return child == parent
elif isinstance (parent, Symbol):
return child == parent.uri
else:
return False
if not isinstance (child, Symbol):
raise ValueError("Child argument must be a Symbol or string. Got %s" % type(child))
return child.is_child_of(parent)
class TimeRange(list):
"""
A class that represents a time range with a beginning and an end.
The timestamps used are integers representing milliseconds since the
Epoch.
By design this class will be automatically transformed to the DBus
type (xx).
"""
# Maximal value of our timestamps
_max_stamp = 2**63 - 1
def __init__ (self, begin, end):
super(TimeRange, self).__init__((int(begin), int(end)))
def __eq__ (self, other):
return self.begin == other.begin and self.end == other.end
def __str__ (self):
return "(%s, %s)" % (self.begin, self.end)
def get_begin(self):
return self[0]
def set_begin(self, begin):
self[0] = begin
begin = property(get_begin, set_begin,
doc="The begining timestamp of this time range")
def get_end(self):
return self[1]
def set_end(self, end):
self[1] = end
end = property(get_end, set_end,
doc="The end timestamp of this time range")
@classmethod
def until_now(cls):
"""
Return a :class:`TimeRange` from 0 to the instant of invocation
"""
return cls(0, int(time.time() * 1000))
@classmethod
def from_now(cls):
"""
Return a :class:`TimeRange` from the instant of invocation to
the end of time
"""
return cls(int(time.time() * 1000), cls._max_stamp)
@classmethod
def from_seconds_ago(cls, sec):
"""
Return a :class:`TimeRange` ranging from "sec" seconds before
the instant of invocation to the same.
"""
now = int(time.time() * 1000)
return cls(now - (sec * 1000), now)
@classmethod
def from_timestamp(cls, timestamp):
"""
Return a :class:`TimeRange` ranging from the given timestamp until
the end of time.
The given timestamp is expected to be expressed in miliseconds.
"""
return cls(int(timestamp), cls._max_stamp)
@classmethod
def always(cls):
"""
Return a :class:`TimeRange` from 0 (January 1, 1970) to the most
distant future
"""
return cls(0, cls._max_stamp)
def is_always(self):
"""
Returns True if this time range goes from timestamp 0 (January 1, 1970)
-or lower- to the most distant future.
"""
return self.begin <= 0 and self.end >= TimeRange._max_stamp
def intersect(self, time_range):
"""
Return a new :class:`TimeRange` that is the intersection of the
two time range intervals. If the intersection is empty this
method returns :const:`None`.
"""
# Behold the boolean madness!
result = TimeRange(0,0)
if self.begin < time_range.begin:
if self.end < time_range.begin:
return None
else:
result.begin = time_range.begin
else:
if self.begin > time_range.end:
return None
else:
result.begin = self.begin
if self.end < time_range.end:
if self.end < time_range.begin:
return None
else:
result.end = self.end
else:
if self.begin > time_range.end:
return None
else:
result.end = time_range.end
return result
class Subject(list):
"""
Represents a subject of an :class:`Event`. This class is both used to
represent actual subjects, but also create subject templates to match
other subjects against.
Applications should normally use the method :meth:`new_for_values` to
create new subjects.
"""
Fields = (Uri,
Interpretation,
Manifestation,
Origin,
Mimetype,
Text,
Storage,
CurrentUri,
CurrentOrigin) = range(9)
SUPPORTS_NEGATION = (Uri, CurrentUri, Interpretation, Manifestation,
Origin, CurrentOrigin, Mimetype)
SUPPORTS_WILDCARDS = (Uri, CurrentUri, Origin, CurrentOrigin, Mimetype)
def __init__(self, data=None):
if data:
if len(data) == len(Subject.Fields) - 2:
# current_uri has been added in Zeitgeist 0.8.0
data.append("")
if len(data) == len(Subject.Fields) - 1:
# current_origin has been added in Zeitgeist 1.0 Beta 1
data.append("")
if len(data) < len(Subject.Fields):
raise ValueError(
"Invalid subject data length %s, expected %s" \
%(len(data), len(Subject.Fields)))
super(Subject, self).__init__(data)
else:
super(Subject, self).__init__([""]*len(Subject.Fields))
def __repr__(self):
return "%s(%s)" %(
self.__class__.__name__, super(Subject, self).__repr__()
)
def __eq__(self, other):
for field in Subject.Fields:
if field in (Subject.CurrentUri, Subject.CurrentOrigin) and \
not self[field] or not other[field]:
continue
if self[field] != other[field]:
return False
return True
@staticmethod
def new_for_values (**values):
"""
Create a new Subject instance and set its properties according
to the keyword arguments passed to this method.
:param uri: The URI of the subject. Eg. *file:///tmp/ratpie.txt*
:param current_uri: The current known URI of the subject (if it was moved or deleted).
:param interpretation: The interpretation type of the subject, given either as a string URI or as a :class:`Interpretation` instance
:param manifestation: The manifestation type of the subject, given either as a string URI or as a :class:`Manifestation` instance
:param origin: The URI of the location where subject resides or can be found
:param current_origin: The URI of the location where subject resides or can be found (if it was moved or deleted).
:param mimetype: The mimetype of the subject encoded as a string, if applicable. Eg. *text/plain*.
:param text: Free form textual annotation of the subject.
:param storage: String identifier for the storage medium of the subject. This should be the UUID of the volume or the string "net" for resources requiring a network interface, and the string "deleted" for subjects that are deleted.
"""
self = Subject()
for key, value in values.iteritems():
if not key in ("uri", "current_uri", "interpretation",
"manifestation", "origin", "current_origin",
"mimetype", "text", "storage"):
raise ValueError("Subject parameter '%s' is not supported" %key)
setattr(self, key, value)
return self
def get_uri(self):
return self[Subject.Uri]
def set_uri(self, value):
self[Subject.Uri] = value
uri = property(get_uri, set_uri,
doc="Read/write property with the URI of the subject encoded as a string")
def get_current_uri(self):
return self[Subject.CurrentUri]
def set_current_uri(self, value):
self[Subject.CurrentUri] = value
current_uri = property(get_current_uri, set_current_uri,
doc="Read/write property with the current URI of the subject encoded as a string")
def get_interpretation(self):
return self[Subject.Interpretation]
def set_interpretation(self, value):
self[Subject.Interpretation] = value
interpretation = property(get_interpretation, set_interpretation,
doc="Read/write property defining the :class:`interpretation type <Interpretation>` of the subject")
def get_manifestation(self):
return self[Subject.Manifestation]
def set_manifestation(self, value):
self[Subject.Manifestation] = value
manifestation = property(get_manifestation, set_manifestation,
doc="Read/write property defining the :class:`manifestation type <Manifestation>` of the subject")
def get_origin(self):
return self[Subject.Origin]
def set_origin(self, value):
self[Subject.Origin] = value
origin = property(get_origin, set_origin,
doc="Read/write property with the URI of the location where the subject can be found. For files this is the parent directory, or for downloaded files it would be the URL of the page where you clicked the download link")
def get_current_origin(self):
return self[Subject.CurrentOrigin]
def set_current_origin(self, value):
self[Subject.CurrentOrigin] = value
current_origin = property(get_current_origin, set_current_origin,
doc="Read/write property with the URI of the location where the subject can be found. For files this is the parent directory, or for downloaded files it would be the URL of the page where you clicked the download link")
def get_mimetype(self):
return self[Subject.Mimetype]
def set_mimetype(self, value):
self[Subject.Mimetype] = value
mimetype = property(get_mimetype, set_mimetype,
doc="Read/write property containing the mimetype of the subject (encoded as a string) if applicable")
def get_text(self):
return self[Subject.Text]
def set_text(self, value):
self[Subject.Text] = value
text = property(get_text, set_text,
doc="Read/write property with a free form textual annotation of the subject")
def get_storage(self):
return self[Subject.Storage]
def set_storage(self, value):
self[Subject.Storage] = value
storage = property(get_storage, set_storage,
doc="Read/write property with a string id of the storage medium where the subject is stored. Fx. the UUID of the disk partition or just the string 'net' for items requiring network interface to be available")
def matches_template (self, subject_template):
"""
Return True if this Subject matches *subject_template*. Empty
fields in the template are treated as wildcards.
Interpretations and manifestations are also matched if they are
children of the types specified in `subject_template`.
See also :meth:`Event.matches_template`
"""
for m in Subject.Fields:
if not subject_template[m]:
# empty fields are handled as wildcards
continue
if m == Subject.Storage:
# we do not support searching by storage field for now
# see LP: #580364
raise ValueError("zeitgeist does not support searching by 'storage' field")
elif m in (Subject.Interpretation, Subject.Manifestation):
# symbols are treated differently
comp = Symbol.uri_is_child_of
else:
comp = EQUAL
if not self._check_field_match(m, subject_template[m], comp):
return False
return True
def _check_field_match(self, field_id, expression, comp):
""" Checks if an expression matches a field given by its `field_id`
using a `comp` comparison function """
if field_id in self.SUPPORTS_NEGATION \
and expression.startswith(NEGATION_OPERATOR):
return not self._check_field_match(field_id, expression[len(NEGATION_OPERATOR):], comp)
elif field_id in self.SUPPORTS_WILDCARDS \
and expression.endswith(WILDCARD):
assert comp == EQUAL, "wildcards only work for pure text fields"
return self._check_field_match(field_id, expression[:-len(WILDCARD)], STARTSWITH)
else:
return comp(self[field_id], expression)
class Event(list):
"""
Core data structure in the Zeitgeist framework. It is an optimized and
convenient representation of an event.
This class is designed so that you can pass it directly over
DBus using the Python DBus bindings. It will automagically be
marshalled with the signature a(asaasay). See also the section
on the :ref:`event serialization format <event_serialization_format>`.
This class does integer based lookups everywhere and can wrap any
conformant data structure without the need for marshalling back and
forth between DBus wire format. These two properties makes it highly
efficient and is recommended for use everywhere.
"""
Fields = (Id,
Timestamp,
Interpretation,
Manifestation,
Actor,
Origin) = range(6)
SUPPORTS_NEGATION = (Interpretation, Manifestation, Actor, Origin)
SUPPORTS_WILDCARDS = (Actor, Origin)
_subject_type = Subject
def __init__(self, struct = None):
"""
If 'struct' is set it must be a list containing the event
metadata in the first position, and optionally the list of
subjects in the second position, and again optionally the event
payload in the third position.
Unless the event metadata contains a timestamp the event will
have its timestamp set to "now". Ie. the instant of invocation.
The event metadata (struct[0]) will be used as is, and must
contain the event data on the positions defined by the
Event.Fields enumeration.
Likewise each member of the subjects (struct[1]) must be an
array with subject metadata defined in the positions as laid
out by the Subject.Fields enumeration.
On the third position (struct[2]) the struct may contain the
event payload, which can be an arbitrary binary blob. The payload
will be transfered over DBus with the 'ay' signature (as an
array of bytes).
"""
super(Event, self).__init__()
if struct:
if len(struct) == 1:
self.append(self._check_event_struct(struct[0]))
self.append([])
self.append("")
elif len(struct) == 2:
self.append(self._check_event_struct(struct[0]))
self.append(map(self._subject_type, struct[1]))
self.append("")
elif len(struct) == 3:
self.append(self._check_event_struct(struct[0]))
self.append(map(self._subject_type, struct[1]))
self.append(struct[2])
else:
raise ValueError("Invalid struct length %s" % len(struct))
# If this event is being created from an existing Event instance,
# make a copy of the list holding the event information. This
# enables the idiom "event2 = Event(event1)" to copy an event.
if isinstance(struct, Event):
self[0] = list(self[0])
else:
self.extend(([""]* len(Event.Fields), [], ""))
# If we have no timestamp just set it to now
if not self[0][Event.Timestamp]:
self[0][Event.Timestamp] = str(get_timestamp_for_now())
# If we have no origin for Event then we set None
if len(self[0]) == 5:
self[0].append(None)
@classmethod
def _check_event_struct(cls, event_data):
if len(event_data) == len(cls.Fields) - 1:
# Old versions of Zeitgeist didn't have the event origin field.
event_data.append("")
if len(event_data) < len(cls.Fields):
raise ValueError("event_data must have %s members, found %s" % \
(len(cls.Fields), len(event_data)))
return event_data
@classmethod
def new_for_data(cls, event_data):
"""
Create a new Event setting event_data as the backing array
behind the event metadata. The contents of the array must
contain the event metadata at the positions defined by the
Event.Fields enumeration.
"""
self = cls()
self[0] = self._check_event_struct(event_data)
return self
@classmethod
def new_for_struct(cls, struct):
"""Returns a new Event instance or None if `struct` is a `NULL_EVENT`"""
if struct == NULL_EVENT:
return None
return cls(struct)
@classmethod
def new_for_values(cls, **values):
"""
Create a new Event instance from a collection of keyword
arguments.
:param timestamp: Event timestamp in milliseconds since the Unix Epoch
:param interpretaion: The Interpretation type of the event
:param manifestation: Manifestation type of the event
:param actor: The actor (application) that triggered the event
:param origin: The origin (domain) where the event was triggered
:param subjects: A list of :class:`Subject` instances
Instead of setting the *subjects* argument one may use a more
convenient approach for events that have exactly one Subject.
Namely by using the *subject_** keys - mapping directly to their
counterparts in :meth:`Subject.new_for_values`:
:param subject_uri:
:param subject_current_uri:
:param subject_interpretation:
:param subject_manifestation:
:param subject_origin:
:param subject_current_origin:
:param subject_mimetype:
:param subject_text:
:param subject_storage:
"""
self = cls()
for key in values:
if not key in ("timestamp", "interpretation", "manifestation",
"actor", "origin", "subjects", "subject_uri",
"subject_current_uri", "subject_interpretation",
"subject_manifestation", "subject_origin",
"subject_current_origin", "subject_mimetype", "subject_text",
"subject_storage"):
raise ValueError("Event parameter '%s' is not supported" % key)
self.timestamp = values.get("timestamp", self.timestamp)
self.interpretation = values.get("interpretation", "")
self.manifestation = values.get("manifestation", "")
self.actor = values.get("actor", "")
self.origin = values.get("origin", "")
self.subjects = values.get("subjects", self.subjects)
if self._dict_contains_subject_keys(values):
if "subjects" in values:
raise ValueError("Subject keys, subject_*, specified together with full subject list")
subj = self._subject_type()
subj.uri = values.get("subject_uri", "")
subj.current_uri = values.get("subject_current_uri", "")
subj.interpretation = values.get("subject_interpretation", "")
subj.manifestation = values.get("subject_manifestation", "")
subj.origin = values.get("subject_origin", "")
subj.current_origin = values.get("subject_current_origin", "")
subj.mimetype = values.get("subject_mimetype", "")
subj.text = values.get("subject_text", "")
subj.storage = values.get("subject_storage", "")
self.subjects = [subj]
return self
@staticmethod
def _dict_contains_subject_keys (dikt):
if "subject_uri" in dikt: return True
elif "subject_current_uri" in dikt: return True
elif "subject_current_origin" in dikt: return True
elif "subject_interpretation" in dikt: return True
elif "subject_manifestation" in dikt: return True
elif "subject_origin" in dikt: return True
elif "subject_mimetype" in dikt: return True
elif "subject_text" in dikt: return True
elif "subject_storage" in dikt: return True
return False
def __repr__(self):
return "%s(%s)" %(
self.__class__.__name__, super(Event, self).__repr__()
)
def append_subject(self, subject=None):
"""
Append a new empty Subject and return a reference to it
"""
if not subject:
subject = self._subject_type()
self.subjects.append(subject)
return subject
def get_subjects(self):
return self[1]
def set_subjects(self, subjects):
self[1] = subjects
subjects = property(get_subjects, set_subjects,
doc="Read/write property with a list of :class:`Subjects <Subject>`")
def get_id(self):
val = self[0][Event.Id]
return int(val) if val else 0
id = property(get_id,
doc="Read only property containing the the event id if the event has one")
def get_timestamp(self):
return self[0][Event.Timestamp]
def set_timestamp(self, value):
self[0][Event.Timestamp] = str(value)
timestamp = property(get_timestamp, set_timestamp,
doc="Read/write property with the event timestamp defined as milliseconds since the Epoch. By default it is set to the moment of instance creation")
def get_interpretation(self):
return self[0][Event.Interpretation]
def set_interpretation(self, value):
self[0][Event.Interpretation] = value
interpretation = property(get_interpretation, set_interpretation,
doc="Read/write property defining the interpretation type of the event")
def get_manifestation(self):
return self[0][Event.Manifestation]
def set_manifestation(self, value):
self[0][Event.Manifestation] = value
manifestation = property(get_manifestation, set_manifestation,
doc="Read/write property defining the manifestation type of the event")
def get_actor(self):
return self[0][Event.Actor]
def set_actor(self, value):
self[0][Event.Actor] = value
actor = property(get_actor, set_actor,
doc="Read/write property defining the application or entity responsible "
"for emitting the event. For applications, the format of this field is "
"the base filename of the corresponding .desktop file with an "
"`application://` URI scheme. For example, "
"`/usr/share/applications/firefox.desktop` is encoded as "
"`application://firefox.desktop`")
def get_origin(self):
return self[0][Event.Origin]
def set_origin(self, value):
self[0][Event.Origin] = value
origin = property(get_origin, set_origin,
doc="Read/write property defining the origin where the event was emitted.")
def get_payload(self):
return self[2]
def set_payload(self, value):
self[2] = value
payload = property(get_payload, set_payload,
doc="Free form attachment for the event. Transfered over DBus as an array of bytes")
def matches_template(self, event_template):
"""
Return True if this event matches *event_template*. The
matching is done where unset fields in the template is
interpreted as wild cards. Interpretations and manifestations
are also matched if they are children of the types specified
in `event_template`. If the template has more than one
subject, this event matches if at least one of the subjects
on this event matches any single one of the subjects on the
template.
Basically this method mimics the matching behaviour
found in the :meth:`FindEventIds` method on the Zeitgeist engine.
"""
# We use direct member access to speed things up a bit
# First match the raw event data
data = self[0]
tdata = event_template[0]
for m in Event.Fields:
if m == Event.Timestamp or not tdata[m]:
# matching be timestamp is not supported and
# empty template-fields are treated as wildcards
continue
if m in (Event.Manifestation, Event.Interpretation):
# special check for symbols
comp = Symbol.uri_is_child_of
else:
comp = EQUAL
if not self._check_field_match(m, tdata[m], comp):
return False
# If template has no subjects we have a match
if len(event_template[1]) == 0 : return True
# Now we check the subjects
for tsubj in event_template[1]:
for subj in self[1]:
if not subj.matches_template(tsubj) : continue
# We have a matching subject, all good!
return True
# Template has subjects, but we never found a match
return False
def _check_field_match(self, field_id, expression, comp):
""" Checks if an expression matches a field given by its `field_id`
using a `comp` comparison function """
if field_id in self.SUPPORTS_NEGATION \
and expression.startswith(NEGATION_OPERATOR):
return not self._check_field_match(field_id, expression[len(NEGATION_OPERATOR):], comp)
elif field_id in self.SUPPORTS_WILDCARDS \
and expression.endswith(WILDCARD):
assert comp == EQUAL, "wildcards only work for pure text fields"
return self._check_field_match(field_id, expression[:-len(WILDCARD)], STARTSWITH)
else:
return comp(self[0][field_id], expression)
def matches_event (self, event):
"""
Interpret *self* as the template an match *event* against it.
This method is the dual method of :meth:`matches_template`.
"""
return event.matches_template(self)
def in_time_range (self, time_range):
"""
Check if the event timestamp lies within a :class:`TimeRange`
"""
t = int(self.timestamp) # The timestamp may be stored as a string
return (t >= time_range.begin) and (t <= time_range.end)
class DataSource(list):
""" Optimized and convenient data structure representing a datasource.
This class is designed so that you can pass it directly over
DBus using the Python DBus bindings. It will automagically be
marshalled with the signature a(asaasay). See also the section
on the :ref:`event serialization format <event_serialization_format>`.
This class does integer based lookups everywhere and can wrap any
conformant data structure without the need for marshalling back and
forth between DBus wire format. These two properties makes it highly
efficient and is recommended for use everywhere.
This is part of the :const:`org.gnome.zeitgeist.DataSourceRegistry`
extension.
"""
Fields = (UniqueId,
Name,
Description,
EventTemplates,
Running,
LastSeen, # last time the data-source did something (connected,
# inserted events, disconnected).
Enabled) = range(7)
def get_unique_id(self):
return self[self.UniqueId]
def set_unique_id(self, value):
self[self.UniqueId] = value
def get_name(self):
return self[self.Name]
def set_name(self, value):
self[self.Name] = value
def get_description(self):
return self[self.Description]
def set_description(self, value):
self[self.Description] = value
def get_running(self):
return self[self.Running]
def set_running(self,value):
self[self.Running] = value
def get_running(self):
return self[self.Running]
def running(self, value):
self[self.Running] = value
def get_last_seen(self):
return self[self.LastSeen]
def set_last_seen(self, value):
self[self.LastSeen] = value
def get_enabled(self):
return self[self.Enabled]
def set_enabled(self, value):
self[self.Enabled] = value
unique_id = property(get_unique_id, set_unique_id)
name = property(get_name, set_name)
description = property(get_description, set_description)
running = property(get_running, set_running)
last_seen = property(get_last_seen, set_last_seen)
enabled = property(get_enabled, set_enabled)
def __init__(self, unique_id, name, description, templates, running=True,
last_seen=None, enabled=True):
"""
Create a new DataSource object using the given parameters.
If you want to instantiate this class from a dbus.Struct, you can
use: DataSource(*data_source), where data_source is the dbus.Struct.
"""
super(DataSource, self).__init__()
self.append(unique_id)
self.append(name)
self.append(description)
self.append(templates)
self.append(bool(running))
self.append(last_seen if last_seen else get_timestamp_for_now())
self.append(enabled)
def __eq__(self, source):
return self[self.UniqueId] == source[self.UniqueId]
def __repr__(self):
return "%s: %s (%s)" % (self.__class__.__name__, self[self.UniqueId],
self[self.Name])
NULL_EVENT = ([], [], [])
"""Minimal Event representation, a tuple containing three empty lists.
This `NULL_EVENT` is used by the API to indicate a queried but not
available (not found or blocked) Event.
"""
class _Enumeration(object):
@classmethod
def iteritems(self):
"""
Return an iterator yielding (name, value) tuples for all items in
this enumeration.
"""
return iter(map(lambda x: (x, getattr(self, x)),
filter(lambda x: not x.startswith('__'), sorted(self.__dict__))))
class RelevantResultType(_Enumeration):
"""
An enumeration class used to define how query results should be returned
from the Zeitgeist engine.
"""
Recent = EnumValue(0, "All uris with the most recent uri first")
Related = EnumValue(1, "All uris with the most related one first")
class StorageState(_Enumeration):
"""
Enumeration class defining the possible values for the storage state
of an event subject.
The StorageState enumeration can be used to control whether or not matched
events must have their subjects available to the user. Fx. not including
deleted files, files on unplugged USB drives, files available only when
a network is available etc.
"""
NotAvailable = EnumValue(0, "The storage medium of the events "
"subjects must not be available to the user")
Available = EnumValue(1, "The storage medium of all event subjects "
"must be immediately available to the user")
Any = EnumValue(2, "The event subjects may or may not be available")
class ResultType(_Enumeration):
"""
An enumeration class used to define how query results should be returned
from the Zeitgeist engine.
"""
MostRecentEvents = EnumValue(0,
"All events with the most recent events first")
LeastRecentEvents = EnumValue(1, "All events with the oldest ones first")
MostRecentSubjects = EnumValue(2, "One event for each subject only, "
"ordered with the most recent events first")
LeastRecentSubjects = EnumValue(3, "One event for each subject only, "
"ordered with oldest events first")
MostPopularSubjects = EnumValue(4, "One event for each subject only, "
"ordered by the popularity of the subject")
LeastPopularSubjects = EnumValue(5, "One event for each subject only, "
"ordered ascendingly by popularity of the subject")
MostPopularActor = EnumValue(6, "The last event of each different actor,"
"ordered by the popularity of the actor")
LeastPopularActor = EnumValue(7, "The last event of each different actor,"
"ordered ascendingly by the popularity of the actor")
MostRecentActor = EnumValue(8,
"The Actor that has been used to most recently")
LeastRecentActor = EnumValue(9,
"The Actor that has been used to least recently")
MostRecentOrigin = EnumValue(10,
"The last event of each different subject origin")
LeastRecentOrigin = EnumValue(11, "The last event of each different "
"subject origin, ordered by least recently used first")
MostPopularOrigin = EnumValue(12, "The last event of each different "
"subject origin, ordered by the popularity of the origins")
LeastPopularOrigin = EnumValue(13, "The last event of each different "
"subject origin, ordered ascendingly by the popularity of the origin")
OldestActor = EnumValue(14, "The first event of each different actor")
MostRecentSubjectInterpretation = EnumValue(15, "One event for each "
"subject interpretation only, ordered with the most recent "
"events first")
LeastRecentSubjectInterpretation = EnumValue(16, "One event for each "
"subject interpretation only, ordered with the least recent "
"events first")
MostPopularSubjectInterpretation = EnumValue(17, "One event for each "
"subject interpretation only, ordered by the popularity of the "
"subject interpretation")
LeastPopularSubjectInterpretation = EnumValue(18, "One event for each "
"subject interpretation only, ordered ascendingly by popularity of "
"the subject interpretation")
MostRecentMimeType = EnumValue(19, "One event for each mimetype only, "
"ordered with the most recent events first")
LeastRecentMimeType = EnumValue(20, "One event for each mimetype only, "
"ordered with the least recent events first")
MostPopularMimeType = EnumValue(21, "One event for each mimetype only, "
"ordered by the popularity of the mimetype")
LeastPopularMimeType = EnumValue(22, "One event for each mimetype only, "
"ordered ascendingly by popularity of the mimetype")
MostRecentCurrentUri = EnumValue(23, "One event for each subject only "
"(by current_uri instead of uri), "
"ordered with the most recent events first")
LeastRecentCurrentUri = EnumValue(24, "One event for each subject only "
"(by current_uri instead of uri), "
"ordered with oldest events first")
MostPopularCurrentUri = EnumValue(25, "One event for each subject only "
"(by current_uri instead of uri), "
"ordered by the popularity of the subject")
LeastPopularCurrentUri = EnumValue(26, "One event for each subject only "
"(by current_uri instead of uri), "
"ordered ascendingly by popularity of the subject")
MostRecentEventOrigin = EnumValue(27,
"The last event of each different origin")
LeastRecentEventOrigin = EnumValue(28, "The last event of each "
" different origin, ordered by least recently used first")
MostPopularEventOrigin = EnumValue(29, "The last event of each "
"different origin, ordered by the popularity of the origins")
LeastPopularEventOrigin = EnumValue(30, "The last event of each "
"different origin, ordered ascendingly by the popularity of the origin")
MostRecentCurrentOrigin = EnumValue(31,
"The last event of each different subject origin")
LeastRecentCurrentOrigin = EnumValue(32, "The last event of each different "
"subject origin, ordered by least recently used first")
MostPopularCurrentOrigin = EnumValue(33, "The last event of each different "
"subject origin, ordered by the popularity of the origins")
LeastPopularCurrentOrigin = EnumValue(34, "The last event of each different "
"subject origin, ordered ascendingly by the popularity of the origin")
# We should eventually migrate over to those names to disambiguate
# subject origin and event origin:
MostRecentSubjectOrigin = MostRecentOrigin
LeastRecentSubjectOrigin = LeastRecentOrigin
MostPopularSubjectOrigin = MostPopularOrigin
LeastPopularSubjectOrigin = LeastPopularOrigin
INTERPRETATION_DOC = \
"""In general terms the *interpretation* of an event or subject is an abstract
description of *"what happened"* or *"what is this"*.
Each interpretation type is uniquely identified by a URI. This class provides
a list of hard coded URI constants for programming convenience. In addition;
each interpretation instance in this class has a *display_name* property, which
is an internationalized string meant for end user display.
The interpretation types listed here are all subclasses of *str* and may be
used anywhere a string would be used.
Interpretations form a hierarchical type tree. So that fx. Audio, Video, and
Image all are sub types of Media. These types again have their own sub types,
like fx. Image has children Icon, Photo, and VectorImage (among others).
Templates match on all sub types, so that a query on subjects with
interpretation Media also match subjects with interpretations
Audio, Photo, and all other sub types of Media.
"""
MANIFESTATION_DOC = \
"""The manifestation type of an event or subject is an abstract classification
of *"how did this happen"* or *"how does this item exist"*.
Each manifestation type is uniquely identified by a URI. This class provides
a list of hard coded URI constants for programming convenience. In addition;
each interpretation instance in this class has a *display_name* property, which
is an internationalized string meant for end user display.
The manifestation types listed here are all subclasses of *str* and may be
used anywhere a string would be used.
Manifestations form a hierarchical type tree. So that fx. ArchiveItem,
Attachment, and RemoteDataObject all are sub types of FileDataObject.
These types can again have their own sub types.
Templates match on all sub types, so that a query on subjects with manifestation
FileDataObject also match subjects of types Attachment or ArchiveItem and all
other sub types of FileDataObject
"""
start_symbols = time.time()
Interpretation = Symbol("Interpretation", doc=INTERPRETATION_DOC)
Manifestation = Symbol("Manifestation", doc=MANIFESTATION_DOC)
_SYMBOLS_BY_URI["Interpretation"] = Interpretation
_SYMBOLS_BY_URI["Manifestation"] = Manifestation
# Load the ontology definitions
ontology_file = os.path.join(os.path.dirname(__file__), "_ontology.py")
try:
execfile(ontology_file)
except IOError:
raise ImportError("Unable to load Zeitgeist ontology. Did you run `make`?")
#
# Bootstrap the symbol relations. We use a 2-pass strategy:
#
# 1) Make sure that all parents and children are registered on each symbol
for symbol in _SYMBOLS_BY_URI.itervalues():
for parent in symbol._parents:
try:
_SYMBOLS_BY_URI[parent]._children[symbol.uri] = None
except KeyError, e:
print "ERROR", e, parent, symbol.uri
pass
for child in symbol._children:
try:
_SYMBOLS_BY_URI[child]._parents.add(symbol.uri)
except KeyError:
print "ERROR", e, child, symbol.uri
pass
# 2) Resolve all child and parent URIs to their actual Symbol instances
for symbol in _SYMBOLS_BY_URI.itervalues():
for child_uri in symbol._children.iterkeys():
symbol._children[child_uri] = _SYMBOLS_BY_URI[child_uri]
parents = {}
for parent_uri in symbol._parents:
parents[parent_uri] = _SYMBOLS_BY_URI[parent_uri]
symbol._parents = parents
if __name__ == "__main__":
print "Success"
end_symbols = time.time()
print >> sys.stderr, "Import time: %s" % (end_symbols - start_symbols)
# vim:noexpandtab:ts=4:sw=4
|
thatchristoph/namebench
|
refs/heads/master
|
nb_third_party/simplejson/__init__.py
|
230
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.0rc3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
|
eoghanbkeegan/eoghanbkeegan
|
refs/heads/master
|
eoghanbkeegan/database.py
|
1
|
# -*- coding: utf-8 -*-
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from .compat import basestring
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = db.relationship
class CRUDMixin(object):
"""Mixin that adds convenience methods for CRUD (create, read, update, delete) operations."""
@classmethod
def create(cls, **kwargs):
"""Create a new record and save it the database."""
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
"""Update specific fields of a record."""
for attr, value in kwargs.items():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
"""Save the record."""
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
"""Remove the record from the database."""
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
"""Base model class that includes CRUD convenience methods."""
__abstract__ = True
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
"""A mixin that adds a surrogate integer 'primary key' column named ``id`` to any declarative-mapped class."""
__table_args__ = {"extend_existing": True}
id = Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""Get record by ID."""
if any(
(
isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float)),
)
):
return cls.query.get(int(record_id))
return None
def reference_col(
tablename, nullable=False, pk_name="id", foreign_key_kwargs=None, column_kwargs=None
):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
foreign_key_kwargs = foreign_key_kwargs or {}
column_kwargs = column_kwargs or {}
return Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name), **foreign_key_kwargs),
nullable=nullable,
**column_kwargs
)
|
wanghe4096/website
|
refs/heads/master
|
aliyun/api/rest/Slb20130221RemoveBackendServersRequest.py
|
1
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Slb20130221RemoveBackendServersRequest(RestApi):
def __init__(self,domain='slb.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.backendServers = None
self.loadBalancerId = None
def getapiname(self):
return 'slb.aliyuncs.com.RemoveBackendServers.2013-02-21'
|
zhaochao/fuel-web
|
refs/heads/master
|
nailgun/nailgun/objects/notification.py
|
6
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from nailgun import consts
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects import Task
from nailgun.objects.serializers.notification import NotificationSerializer
class Notification(NailgunObject):
model = models.Notification
serializer = NotificationSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Notification",
"description": "Serialized Notification object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"node_id": {"type": "number"},
"task_id": {"type": "number"},
"time": {"type": "string"},
"date": {"type": "string"},
"topic": {
"type": "string",
"enum": list(consts.NOTIFICATION_TOPICS)
},
"message": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.NOTIFICATION_STATUSES)
}
}
}
@classmethod
def create(cls, data):
"""Creates and returns a notification instance.
:param data: a dict with notification data
:returns: a notification instance in case of notification
doesn't exist; otherwise - None
"""
topic = data.get("topic")
node_id = data.get("node_id")
task_uuid = data.pop("task_uuid", None)
message = data.get("message")
if topic == 'discover' and node_id is None:
raise errors.CannotFindNodeIDForDiscovering(
"No node id in discover notification"
)
if "datetime" not in data:
data["datetime"] = datetime.now()
exist = None
if task_uuid:
task = Task.get_by_uuid(task_uuid)
if task and node_id:
exist = NotificationCollection.count(
NotificationCollection.filter_by(
None,
node_id=node_id,
message=message,
task_id=task.id
)
)
if not exist:
notification = super(Notification, cls).create(data)
logger.info(
u"Notification: topic: {0} message: {1}".format(
data.get("topic"),
data.get("message")
)
)
return notification
return None
@classmethod
def to_dict(cls, instance, fields=None):
notif_dict = cls.serializer.serialize(instance, fields=fields)
notif_dict['time'] = instance.datetime.strftime('%H:%M:%S')
notif_dict['date'] = instance.datetime.strftime('%d-%m-%Y')
return notif_dict
class NotificationCollection(NailgunCollection):
single = Notification
|
detrout/debian-python-dateutil
|
refs/heads/master
|
dateutil/zoneinfo/__init__.py
|
97
|
# -*- coding: utf-8 -*-
import logging
import os
import warnings
import tempfile
import shutil
from subprocess import check_call
from tarfile import TarFile
from pkgutil import get_data
from io import BytesIO
from contextlib import closing
from dateutil.tz import tzfile
__all__ = ["gettz", "rebuild"]
_ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
# it's close enough for python2.6
_tar_open = TarFile.open
if not hasattr(TarFile, '__exit__'):
def _tar_open(*args, **kwargs):
return closing(TarFile.open(*args, **kwargs))
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile_stream():
try:
return BytesIO(get_data(__name__, _ZONEFILENAME))
except IOError as e: # TODO switch to FileNotFoundError?
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with _tar_open(fileobj=zonefile_stream, mode='r') as tf:
# dict comprehension does not work on python2.6
# TODO: get back to the nicer syntax when we ditch python2.6
# self.zones = {zf.name: tzfile(tf.extractfile(zf),
# filename = zf.name)
# for zf in tf.getmembers() if zf.isfile()}
self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
filename=zf.name))
for zf in tf.getmembers() if zf.isfile())
# deal with links: They'll point to their parent object. Less
# waste of memory
# links = {zl.name: self.zones[zl.linkname]
# for zl in tf.getmembers() if zl.islnk() or zl.issym()}
links = dict((zl.name, self.zones[zl.linkname])
for zl in tf.getmembers() if
zl.islnk() or zl.issym())
self.zones.update(links)
else:
self.zones = dict()
# The current API has gettz as a module function, although in fact it taps into
# a stateful class. So as a workaround for now, without changing the API, we
# will create a new "global" class instance the first time a user requests a
# timezone. Ugly, but adheres to the api.
#
# TODO: deprecate this.
_CLASS_ZONE_INSTANCE = list()
def gettz(name):
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
def rebuild(filename, tag=None, format="gz", zonegroups=[]):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ftp.iana.org/tz.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with _tar_open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
try:
check_call(["zic", "-d", zonedir] + filepaths)
except OSError as e:
if e.errno == 2:
logging.error(
"Could not find zic. Perhaps you need to install "
"libc-bin or some other package that provides it, "
"or it's not in your PATH?")
raise
target = os.path.join(moduledir, _ZONEFILENAME)
with _tar_open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir)
|
amith01994/intellij-community
|
refs/heads/master
|
python/testData/completion/fromImportBinary.py
|
83
|
from root import b<caret>
|
Notgnoshi/clippy
|
refs/heads/master
|
clippy/client.py
|
1
|
from .clipboard import Clipboard
from time import sleep
from socket import *
class Client(object):
# TODO: Be more intelligent with ports and addresses
def __init__(self, server='192.168.0.111', send_port=23456, receive_port=34567, listen_timeout=0.01):
self.send_address = (server, send_port)
# TODO: Avoid hard coding this address
self.receive_address = ('192.168.0.100', receive_port)
self.listen_timeout = listen_timeout
self.clipboard = Clipboard()
# The current system-wide clipboard text
self.master_text = None
# TODO: What's a good max size for updates?
self.BUFFER_SIZE = 4096
def _sendClientUpdate(self):
"""
Send clipboard update to server
"""
# TODO: why the conversion to bytes here? -- because bytes work better with sockets?
data = bytes(str(self.master_text) + '\n', 'utf-8')
s = socket(AF_INET, SOCK_STREAM)
s.connect(self.send_address)
s.send(data)
s.close()
def _grabServerUpdate(self):
"""
Get clipboard update from server
"""
s = socket(AF_INET, SOCK_STREAM)
s.bind(self.receive_address)
s.listen(5)
conn, addr = s.accept()
try:
# TODO: Maybe look for an EOF to signal the end of an update? Or simply give the size in the first few bytes of the update.
buff = conn.recv(self.BUFFER_SIZE)
if buff:
s.close()
return buff.decode('utf-8')
else:
s.close()
return None
except timeout:
s.close()
return None
def _update(self):
"""
Perform one update check and push/pull the clipboard updates if there is one.
"""
client_text = self.clipboard.get()
server_text = self._grabServerUpdate()
# if the server has updated text, update the client clipboard
if server_text is not None:
print("Server has new text. Updating client.")
print(server_text)
self.clipboard.set(server_text)
self.master_text = server_text
# else the server does not have updated text, but check if the client does
elif client_text is not None and client_text != self.master_text:
print("Client has new text.")
self.master_text = client_text
self._sendClientUpdate()
# Otherwise neither the server nor the client have updates to pass around
else:
pass
def monitor(self):
"""
Performs continuous update checks.
"""
while True:
try:
self._update()
sleep(0.01)
except KeyboardInterrupt:
break
|
teltek/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/site_configuration/templatetags/configuration.py
|
46
|
"""
Template tags and helper functions for displaying breadcrumbs in page titles
based on the current site.
"""
from django import template
from django.conf import settings
from django.templatetags.static import static
from django.contrib.staticfiles.storage import staticfiles_storage
from openedx.core.djangoapps.theming import helpers as theming_helpers
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
register = template.Library() # pylint: disable=invalid-name
@register.simple_tag(name="page_title_breadcrumbs", takes_context=True)
def page_title_breadcrumbs_tag(context, *crumbs): # pylint: disable=unused-argument
"""
Django template that creates breadcrumbs for page titles:
{% page_title_breadcrumbs "Specific" "Less Specific" General %}
"""
return configuration_helpers.page_title_breadcrumbs(*crumbs)
@register.simple_tag(name="platform_name")
def platform_name():
"""
Django template tag that outputs the current platform name:
{% platform_name %}
"""
return configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
@register.simple_tag(name="favicon_path")
def favicon_path(default=getattr(settings, 'FAVICON_PATH', 'images/favicon.ico')):
"""
Django template tag that outputs the configured favicon:
{% favicon_path %}
"""
return staticfiles_storage.url(configuration_helpers.get_value('favicon_path', default))
@register.simple_tag(name="microsite_css_overrides_file")
def microsite_css_overrides_file():
"""
Django template tag that outputs the css import for a:
{% microsite_css_overrides_file %}
"""
file_path = configuration_helpers.get_value('css_overrides_file', None)
if file_path is not None:
return "<link href='{}' rel='stylesheet' type='text/css'>".format(static(file_path))
else:
return ""
@register.filter
def microsite_template_path(template_name):
"""
Django template filter to apply template overriding to microsites.
The django_templates loader does not support the leading slash, therefore
it is stripped before returning.
"""
template_name = theming_helpers.get_template_path(template_name)
return template_name[1:] if template_name[0] == '/' else template_name
|
apocquet/django
|
refs/heads/master
|
tests/admin_scripts/broken_app/__init__.py
|
12133432
| |
ffmmjj/kairos-face-sdk-python
|
refs/heads/master
|
kairos_face/verify.py
|
1
|
import base64
import requests
from kairos_face import exceptions
from kairos_face import settings
from kairos_face.utils import validate_file_and_url_presence, validate_settings
_verify_base_url = settings.base_url + 'verify'
def verify_face(subject_id, gallery_name, url=None, file=None, additional_arguments={}):
validate_settings()
validate_file_and_url_presence(file, url)
auth_headers = {
'app_id': settings.app_id,
'app_key': settings.app_key
}
payload = _build_payload(subject_id, gallery_name, url, file, additional_arguments)
response = requests.post(_verify_base_url, json=payload, headers=auth_headers)
json_response = response.json()
if response.status_code != 200 or 'Errors' in json_response:
raise exceptions.ServiceRequestError(response.status_code, json_response, payload)
return json_response
def _build_payload(subject_id, gallery_name, url, file, additional_arguments):
if file is not None:
image = _extract_base64_contents(file)
else:
image = url
required_fields = {'image': image, 'subject_id': subject_id,
'gallery_name': gallery_name}
return dict(required_fields, **additional_arguments)
def _extract_base64_contents(file):
with open(file, 'rb') as fp:
image = base64.b64encode(fp.read()).decode('ascii')
return image
|
jamesblunt/sympy
|
refs/heads/master
|
sympy/geometry/exceptions.py
|
123
|
"""Geometry Errors."""
from __future__ import print_function, division
class GeometryError(ValueError):
"""An exception raised by classes in the geometry module."""
pass
|
toshywoshy/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_firewall_schedule_recurring.py
|
7
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_schedule_recurring
short_description: Recurring schedule configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_schedule feature and recurring category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_schedule_recurring:
description:
- Recurring schedule configuration.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
type: int
day:
description:
- One or more days of the week on which the schedule is valid. Separate the names of the days with a space.
type: str
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- none
end:
description:
- "Time of day to end the schedule, format hh:mm."
type: str
name:
description:
- Recurring schedule name.
required: true
type: str
start:
description:
- "Time of day to start the schedule, format hh:mm."
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Recurring schedule configuration.
fortios_firewall_schedule_recurring:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_schedule_recurring:
color: "3"
day: "sunday"
end: "<your_own_value>"
name: "default_name_6"
start: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_schedule_recurring_data(json):
option_list = ['color', 'day', 'end',
'name', 'start']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_schedule_recurring(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_schedule_recurring'] and data['firewall_schedule_recurring']:
state = data['firewall_schedule_recurring']['state']
else:
state = True
firewall_schedule_recurring_data = data['firewall_schedule_recurring']
filtered_data = underscore_to_hyphen(filter_firewall_schedule_recurring_data(firewall_schedule_recurring_data))
if state == "present":
return fos.set('firewall.schedule',
'recurring',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.schedule',
'recurring',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_schedule(data, fos):
if data['firewall_schedule_recurring']:
resp = firewall_schedule_recurring(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_schedule_recurring": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"day": {"required": False, "type": "str",
"choices": ["sunday", "monday", "tuesday",
"wednesday", "thursday", "friday",
"saturday", "none"]},
"end": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"start": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
loulich/Couchpotato
|
refs/heads/master
|
couchpotato/core/notifications/xmpp_.py
|
96
|
from time import sleep
import traceback
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import xmpp
log = CPLog(__name__)
autoload = 'Xmpp'
class Xmpp(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
jid = xmpp.protocol.JID(self.conf('username'))
client = xmpp.Client(jid.getDomain(), debug = [])
# Connect
if not client.connect(server = (self.conf('hostname'), self.conf('port'))):
log.error('XMPP failed: Connection to server failed.')
return False
# Authenticate
if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()):
log.error('XMPP failed: Failed to authenticate.')
return False
# Send message
client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat'))
# Disconnect
# some older servers will not send the message if you disconnect immediately after sending
sleep(1)
client.disconnect()
log.info('XMPP notifications sent.')
return True
except:
log.error('XMPP failed: %s', traceback.format_exc())
return False
config = [{
'name': 'xmpp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'xmpp',
'label': 'XMPP',
'description`': 'for Jabber, Hangouts (Google Talk), AIM...',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'username',
'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.',
},
{
'name': 'password',
'type': 'Password',
},
{
'name': 'hostname',
'default': 'talk.google.com',
},
{
'name': 'to',
'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.',
},
{
'name': 'port',
'type': 'int',
'default': 5222,
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
|
Tapisama/SL
|
refs/heads/master
|
pygame/pygame_rect.py
|
1
|
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import pygame,sys
pygame.init();#initialize pygame
screen=pygame.display.set_mode((640,460))#create the screen
pygame.display.set_caption("Pygame tutorial")#sets the screen name
rect=(0,0,32,32)#rect to draw
color=(255,0,0)#color for the rect
screen.fill((255,255,255))#color to clear the screen
pygame.draw.rect(screen,color,rect)#draws the rect
pygame.display.update()#updates the screen
pygame.time.delay(2000)#paauses for 2 seconds
pygame.quit()
|
jroyal/plexpy
|
refs/heads/master
|
lib/concurrent/futures/process.py
|
196
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Request Q"
"""
from __future__ import with_statement
import atexit
import multiprocessing
import threading
import weakref
import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
nb_shutdown_processes = [0]
def shutdown_one_process():
"""Tell a worker to terminate, which will in turn wake us again"""
call_queue.put(None)
nb_shutdown_processes[0] += 1
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
result_item = result_queue.get(block=True)
if result_item is not None:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
while nb_shutdown_processes[0] < len(processes):
shutdown_one_process()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
call_queue.close()
return
del executor
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
import os
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermine limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
Woomp/fibonacci
|
refs/heads/develop
|
docs/conf.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
thisdir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(thisdir, '../src')))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
from src import fibonacci
# -- General configuration ---------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
spelling_word_list_filename='spelling_wordlist.txt'
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fibonacci'
copyright = u'2015, Maik Figura'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = fibonacci.__version__
# The full version, including alpha/beta/rc tags.
release = fibonacci.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'fibonaccidoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'fibonacci.tex',
u'fibonacci Documentation',
u'Maik Figura', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fibonacci',
u'fibonacci Documentation',
[u'Maik Figura'], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'fibonacci',
u'fibonacci Documentation',
u'Maik Figura',
'fibonacci',
'One line description of project.',
'Miscellaneous'),
]
# -- Autodoc Config -------------------------------------------------------
autoclass_content = 'class' # include __init__ docstring
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
autosummary_generate = True
|
daniele-athome/kontalk-legacy-xmppserver
|
refs/heads/master
|
twisted/plugins/xmppnet.py
|
1
|
# -*- coding: utf-8 -*-
"""twistd plugin for XMPP net."""
"""
Kontalk XMPP server
Copyright (C) 2014 Kontalk Devteam <devteam@kontalk.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import demjson
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker, MultiService
class Options(usage.Options):
optParameters = [["config", "c", "net.conf", "Configuration file."]]
class KontalkNetServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "kontalk-net"
description = "Kontalk XMPP Net component."
options = Options
def makeService(self, options):
from kontalk.xmppserver.component.net import NetComponent
from kontalk.xmppserver import log
# load configuration
fp = open(options['config'], 'r')
config = demjson.decode(fp.read(), allow_comments=True)
fp.close()
log.init(config)
appl = MultiService()
comp = NetComponent(config)
comp.setServiceParent(appl)
comp.setup().setServiceParent(appl)
return appl
serviceMaker = KontalkNetServiceMaker()
|
markgrovs/alfred-airmail-to-todoist
|
refs/heads/develop
|
src/lib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
|
374
|
import sys
try:
# Our match_hostname function is the same as 3.5's, so we only want to
# import the match_hostname function if it's at least that good.
if sys.version_info < (3, 5):
raise ImportError("Fallback to vendored code")
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
neale/relations
|
refs/heads/master
|
relations-aes/server/wtf.py
|
2
|
import Rpi.GPIO as GPIO
import serial as uart
import pymongo
from pymongo import Connection
import datetime
connection = Connection('localhost', 3000)
db = connection.test_database
port = uart.Serial("dev/ttyAMA0", baudrate=38400, timeout = 3.0)
port.flashInput()
port.flushOutput()
class packet:
expectedPacket = []
packetCount = 0
verified = False
def __init__(self, packet, dataSize):
expectedPacket = [0xFF, dataSize+7, 0xAB, 0xBC, 32, 0, 0xFF]
self.packet = packet
self.dataSize = dataSize
def check(self):
for i in range(len(expectedPacked)):
if(i is not 6):
if(expectedPacket[i] == self.packet[i]):
veriied is True
else verified is False
if(i is 6):
i += self.dataSize
def fill(self):
self.packet.start = 0
self.packet.length = 0
self.packet.sendAddr = 0x00
self.packet.destAddr = 0x00
self.packet.ID_HC = 0
self.packet.data = [0, 0]
self.packet.CRC = 0
self.packet.stop = 0x00
## set up GPIO TXRX pins for uart line
GPIO.setmode(GPIO.BOARD);
GPIO.setup(8, GPIO.IN, pull_up_down=GPIO.PUD_DOWN);
GPIO.setup(10, GPIO.OUT, pull_up_down=GPIO.PUD_DOWN);
def readLineCR(port):
person = 0
while True:
bytesToRead = port.inWaiting()
port.read(bytesToRead)
response = readLines(port)
if(response
return person
while True:
port.write("count received")
person = readLineCR(port)
print("\r\n + person + \r\n")
post = {"author": "Neale",
"text": person,
"date": datetime.datetime.utcnow()
}
posts = db.posts
posts.insert(post)
ObjectId('...')
port.close()
GPIO.cleanup()
|
kandle/twitter-grab
|
refs/heads/master
|
grab.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import twitter
import ConfigParser, os
config = ConfigParser.ConfigParser()
config.readfp(open('grabber.conf'))
api = twitter.Api(consumer_key=config.get("twitter", "consumer_key"),
consumer_secret=config.get("twitter", "consumer_secret"),
access_token_key=config.get("twitter", "access_token_key"),
access_token_secret=config.get("twitter", "access_token_secret"))
statuses = api.GetSearch(term=["#kcandle"])
for s in reversed(statuses):
# print s
source = s.user.name
target = ", ".join([ i.name for i in s.user_mentions if i.name != source ])
# topics = " ".join([ "#"+h.text for h in s.hashtags if h.text.lower() != "kcandle"])
topics = " ".join([ "#"+h.text for h in s.hashtags ])
print u"[%s] %s 🔥 → %s : %s" % ( s.created_at, source, target, topics )
print s.text
print ""
|
poljeff/odoo
|
refs/heads/8.0
|
addons/stock_picking_wave/controllers/main.py
|
418
|
from openerp.addons.web import http
from openerp.addons.web.http import request
class picking_wave_report(http.Controller):
@http.route('/report/stock_picking_wave.report_pickingwave/<ids>', type='http', auth='user',
website=True)
def report_picking_wave(self, ids):
self.cr, self.uid, self.pool = request.cr, request.uid, request.registry
ids = [int(i) for i in ids.split(',')]
picking_wave_obj = self.pool["stock.picking.wave"]
wave = picking_wave_obj.browse(self.cr, self.uid, ids[0])
docargs = {
'docs': wave.picking_ids,
}
return request.registry['report'].render(self.cr, self.uid, [], 'stock.report_picking', docargs)
|
andela-sjames/team1
|
refs/heads/master
|
main.py
|
3
|
import urllib2
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse
import re
def analyse_web(root,max_depth):
if type(max_depth) == str:
max_depth = int(max_depth)
print "*** Fetching external links for "+root
page1, stat= get_page(root)
stat = re.sub("[!@#$']", '', stat.encode('utf8'))
external = get_external(page1,root)
crawled = {}
crawldepth = {}
crawled[stat]={'parent':'root'}
print "*** "+`len(external)`+" link(s) found on "+root
for check in external:
if check != "":
domain = get_domain(check)
else:
continue
filter_domain = [domain]
#set domain and depth
tocrawl = [[check,1]]
while tocrawl:
crawl_ele = tocrawl.pop()
link = crawl_ele[0]
depth = crawl_ele[1]
if link not in crawled.keys():
if link is not None and link != '#' and link != '/' and link != '?':
print "*** Fetching data from "+link
content, title = get_page(link)
title = re.sub("[!@#$']", '', title.encode('utf8'))
if content == None:
continue
else:
crawldepth[depth]=title
host = get_domain(link)
if depth < max_depth and host in filter_domain :
outlinks = get_all_links(content,link)
print "*** "+`len(outlinks)`+" link(s) found on "+link
add_to_tocrawl(crawled.keys(),tocrawl, outlinks, depth+1)
if depth == 1:
crawled[title]={'parent':stat}
else:
crawled[title]={'parent':crawldepth[depth-1]}
return crawled
def get_external(soup,url):
return [l.get('href') for l in soup.findAll('a') if is_external(url,l.get('href'))]
def get_domain(url):
hostname = urlparse(url).hostname
if len(re.findall( r'[0-9]+(?:\.[0-9]+){3}', hostname)) > 0:
return hostname
elif len(hostname.split('.')) == 0:
hostname
elif hostname.find('www.') != -1:
return hostname.split('.')[0]
else:
return hostname.split('.')[1]
def is_external(root,host):
if len(host) > 0:
if host[0] == '/' or host[0] == '#' or host[0] == '?':
return False
host = urlparse(host).hostname
hostname = urlparse(root).hostname
if host == None:
return False
return host != hostname and host.find(hostname) == -1
def get_page(url):
try:
response = urllib2.urlopen(url)
soup = BeautifulSoup(response)
return soup, soup.title.string
except urllib2.HTTPError,e:
return None, str(e.code)
except urllib2.URLError,e:
return None, 'Invalid Url'
except:
return None, 'Wrong Url'
def get_all_links(page,parent):
return [l.get('href') for l in page.findAll('a') ]
def add_to_tocrawl(crawled, tocrawl, newlinks, depth):
for link in newlinks:
if link not in tocrawl and link not in crawled:
tocrawl.append([link,depth])
|
bolmsten/mxcube3
|
refs/heads/master
|
mxcube3/remote_access.py
|
2
|
import logging
from flask import request, session
from mxcube3 import socketio
from collections import deque
from mxcube3 import app as mxcube
MASTER = None
MASTER_ROOM = None
PENDING_EVENTS = deque()
DISCONNECT_HANDLED = True
OBSERVERS = {}
def set_master(master_sid):
global MASTER
MASTER = master_sid
def is_master(sid):
return MASTER == sid
def flush():
global MASTER
global PENDING_EVENTS
MASTER = None
PENDING_EVENTS = deque()
def _event_callback():
event_id, event, json_dict, kw = PENDING_EVENTS.popleft()
emit_pending_events()
def emit_pending_events():
try:
event_id, event, json_dict, kwargs = PENDING_EVENTS[0]
except IndexError:
pass
else:
return _emit(event, json_dict, **kwargs)
def _emit(event, json_dict, **kwargs):
kw = dict(kwargs)
kw['callback'] = _event_callback
kw['room'] = MASTER_ROOM
socketio.emit(event, json_dict, **kw)
def safe_emit(event, json_dict, **kwargs):
PENDING_EVENTS.append((id(json_dict), event, json_dict, kwargs))
if len(PENDING_EVENTS) == 1:
emit_pending_events()
@socketio.on('connect', namespace='/hwr')
def connect():
global MASTER_ROOM, DISCONNECT_HANDLED
if is_master(session.sid):
MASTER_ROOM = request.sid
emit_pending_events()
if not mxcube.queue.queue_hwobj.is_executing() and not DISCONNECT_HANDLED:
DISCONNECT_HANDLED = True
socketio.emit("resumeQueueDialog", namespace='/hwr')
msg = 'Client reconnected, Queue was previously stopped, asking '
msg += 'client for action'
logging.getLogger('HWR').info(msg)
@socketio.on('disconnect', namespace='/hwr')
def disconnect():
global DISCONNECT_HANDLED, MASTER_ROOM
if is_master(session.sid) and MASTER_ROOM == request.sid and \
mxcube.queue.queue_hwobj.is_executing():
DISCONNECT_HANDLED = False
mxcube.queue.queue_hwobj.stop()
logging.getLogger('HWR').info('Client disconnected, stopping queue')
@socketio.on('setRaMaster', namespace='/hwr')
def set_master_id(data):
global MASTER_ROOM, OBSERVERS
if data['master']:
MASTER_ROOM = request.sid
emit_pending_events()
else:
OBSERVERS[remote_addr()] = {"host": remote_addr(),
"name": data["name"],
"requestsControl": False,
"message": '',
"sid": session.sid}
socketio.emit("observersChanged", OBSERVERS.values(), namespace='/hwr')
return session.sid
def observer_name():
global OBSERVERS
observer_name = ''
try:
observer_name = OBSERVERS[remote_addr()]['name']
except KeyError:
pass
return observer_name
def remote_addr():
return str(request.headers.get('x-forwarded-for', request.remote_addr))
|
steven-martins/ramassage.epitech.eu
|
refs/heads/master
|
actions/pickup.py
|
1
|
__author__ = 'steven'
from mixins.scm import GitMixin
from exceptions import RepositoryNameMissing
import logging
import os
import config
from api_tools import Mapping
mapping = Mapping()
class Pickup(GitMixin):
def __init__(self, task_id, project):
self._project = project
self._task_id = task_id
def one(self, login):
if not self._project["template"]["repository_name"] or len(self._project["template"]["repository_name"]) == 0:
raise RepositoryNameMissing()
old_login = mapping.users[login] if login in mapping.users else None
logging.info("Pickup.one(%s)" % (login))
succeed, repo = self._retrieve_repository(login, self._project["template"]["repository_name"],
self._task_id, self._project["city"], old_login)
return succeed, repo
def clean_all(self):
if not self._project["template"]["repository_name"] or len(self._project["template"]["repository_name"]) == 0:
raise RepositoryNameMissing()
self._remove_all_repository(self._task_id, self._project["template"]["repository_name"])
def archive(self):
repos_uri = os.path.join(config.REPOS_DIR, "%(task_id)s/%(repos)s" %
{"task_id": self._task_id, "repos": self._project["template"]["repository_name"]})
arch = self._cleanfilename("%s-%s" % (self._project["title"], self._project["instance_code"]))
self._archive(config.ARCHIVE_DIR, arch,
repos_uri, versioned=True)
return arch
def distribute(self):
archive_name = os.path.join(config.ARCHIVE_DIR, self._cleanfilename("%s-%s" % (
self._project["title"], self._project["instance_code"])))
filename = self._last_version("%s.zip" % (archive_name), with_extension=True)
filename = os.path.basename(filename)
filepath = config.ARCHIVE_DIR
if "resp" not in self._project:
self._project["resp"] = []
if "template_resp" not in self._project:
self._project["template_resp"] = []
if "assistants" not in self._project:
self._project["assistants"] = []
if "template_resp" in self._project:
self._project["template_resp"].append({'lastname': None, 'login': config.TRICHE_LOGIN, 'id': None,
'firstname': None})
if "resp" in self._project:
self._distribute(self._project["resp"], filename, self._project["scolaryear"],
self._cleanfilename(self._project["module_title"]),
self._cleanfilename(self._project["title"]), filepath)
if "template_resp" in self._project:
self._distribute(self._project["template_resp"], filename, self._project["scolaryear"],
self._cleanfilename(self._project["module_title"]),
self._cleanfilename(self._project["title"]), filepath)
if "assistants" in self._project:
self._distribute(self._project["assistants"], filename, self._project["scolaryear"],
self._cleanfilename(self._project["module_title"]),
self._cleanfilename(self._project["title"]), filepath)
|
synweap15/pyload
|
refs/heads/stable
|
pyLoadCore.py
|
34
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: spoob
@author: sebnapi
@author: RaNaN
@author: mkaay
@version: v0.4.9
"""
CURRENT_VERSION = '0.4.9'
import __builtin__
from getopt import getopt, GetoptError
import module.common.pylgettext as gettext
from imp import find_module
import logging
import logging.handlers
import os
from os import _exit, execl, getcwd, makedirs, remove, sep, walk, chdir, close
from os.path import exists, join
import signal
import subprocess
import sys
from sys import argv, executable, exit
from time import time, sleep
from traceback import print_exc
from module import InitHomeDir
from module.plugins.AccountManager import AccountManager
from module.CaptchaManager import CaptchaManager
from module.ConfigParser import ConfigParser
from module.plugins.PluginManager import PluginManager
from module.PullEvents import PullManager
from module.network.RequestFactory import RequestFactory
from module.web.ServerThread import WebServer
from module.Scheduler import Scheduler
from module.common.JsEngine import JsEngine
from module import remote
from module.remote.RemoteManager import RemoteManager
from module.database import DatabaseBackend, FileHandler
from module.utils import freeSpace, formatSize, get_console_encoding
from codecs import getwriter
enc = get_console_encoding(sys.stdout.encoding)
sys.stdout = getwriter(enc)(sys.stdout, errors="replace")
# TODO List
# - configurable auth system ldap/mysql
# - cron job like sheduler
class Core(object):
"""pyLoad Core, one tool to rule them all... (the filehosters) :D"""
def __init__(self):
self.doDebug = False
self.startedInGui = False
self.running = False
self.daemon = False
self.remote = True
self.arg_links = []
self.pidfile = "pyload.pid"
self.deleteLinks = False # will delete links on startup
if len(argv) > 1:
try:
options, args = getopt(argv[1:], 'vchdusqp:',
["version", "clear", "clean", "help", "debug", "user",
"setup", "configdir=", "changedir", "daemon",
"quit", "status", "no-remote","pidfile="])
for option, argument in options:
if option in ("-v", "--version"):
print "pyLoad", CURRENT_VERSION
exit()
elif option in ("-p", "--pidfile"):
self.pidfile = argument
elif option == "--daemon":
self.daemon = True
elif option in ("-c", "--clear"):
self.deleteLinks = True
elif option in ("-h", "--help"):
self.print_help()
exit()
elif option in ("-d", "--debug"):
self.doDebug = True
elif option in ("-u", "--user"):
from module.setup import Setup
self.config = ConfigParser()
s = Setup(pypath, self.config)
s.set_user()
exit()
elif option in ("-s", "--setup"):
from module.setup import Setup
self.config = ConfigParser()
s = Setup(pypath, self.config)
s.start()
exit()
elif option == "--changedir":
from module.setup import Setup
self.config = ConfigParser()
s = Setup(pypath, self.config)
s.conf_path(True)
exit()
elif option in ("-q", "--quit"):
self.quitInstance()
exit()
elif option == "--status":
pid = self.isAlreadyRunning()
if self.isAlreadyRunning():
print pid
exit(0)
else:
print "false"
exit(1)
elif option == "--clean":
self.cleanTree()
exit()
elif option == "--no-remote":
self.remote = False
except GetoptError:
print 'Unknown Argument(s) "%s"' % " ".join(argv[1:])
self.print_help()
exit()
def print_help(self):
print ""
print "pyLoad v%s 2008-2011 the pyLoad Team" % CURRENT_VERSION
print ""
if sys.argv[0].endswith(".py"):
print "Usage: python pyLoadCore.py [options]"
else:
print "Usage: pyLoadCore [options]"
print ""
print "<Options>"
print " -v, --version", " " * 10, "Print version to terminal"
print " -c, --clear", " " * 12, "Delete all saved packages/links"
#print " -a, --add=<link/list>", " " * 2, "Add the specified links"
print " -u, --user", " " * 13, "Manages users"
print " -d, --debug", " " * 12, "Enable debug mode"
print " -s, --setup", " " * 12, "Run Setup Assistent"
print " --configdir=<dir>", " " * 6, "Run with <dir> as config directory"
print " -p, --pidfile=<file>", " " * 3, "Set pidfile to <file>"
print " --changedir", " " * 12, "Change config dir permanently"
print " --daemon", " " * 15, "Daemonmize after start"
print " --no-remote", " " * 12, "Disable remote access (saves RAM)"
print " --status", " " * 15, "Display pid if running or False"
print " --clean", " " * 16, "Remove .pyc/.pyo files"
print " -q, --quit", " " * 13, "Quit running pyLoad instance"
print " -h, --help", " " * 13, "Display this help screen"
print ""
def toggle_pause(self):
if self.threadManager.pause:
self.threadManager.pause = False
return False
elif not self.threadManager.pause:
self.threadManager.pause = True
return True
def quit(self, a, b):
self.shutdown()
self.log.info(_("Received Quit signal"))
_exit(1)
def writePidFile(self):
self.deletePidFile()
pid = os.getpid()
f = open(self.pidfile, "wb")
f.write(str(pid))
f.close()
def deletePidFile(self):
if self.checkPidFile():
self.log.debug("Deleting old pidfile %s" % self.pidfile)
os.remove(self.pidfile)
def checkPidFile(self):
""" return pid as int or 0"""
if os.path.isfile(self.pidfile):
f = open(self.pidfile, "rb")
pid = f.read().strip()
f.close()
if pid:
pid = int(pid)
return pid
return 0
def isAlreadyRunning(self):
pid = self.checkPidFile()
if not pid or os.name == "nt": return False
try:
os.kill(pid, 0) # 0 - default signal (does nothing)
except:
return 0
return pid
def quitInstance(self):
if os.name == "nt":
print "Not supported on windows."
return
pid = self.isAlreadyRunning()
if not pid:
print "No pyLoad running."
return
try:
os.kill(pid, 3) #SIGUIT
t = time()
print "waiting for pyLoad to quit"
while exists(self.pidfile) and t + 10 > time():
sleep(0.25)
if not exists(self.pidfile):
print "pyLoad successfully stopped"
else:
os.kill(pid, 9) #SIGKILL
print "pyLoad did not respond"
print "Kill signal was send to process with id %s" % pid
except:
print "Error quitting pyLoad"
def cleanTree(self):
for path, dirs, files in walk(self.path("")):
for f in files:
if not f.endswith(".pyo") and not f.endswith(".pyc"):
continue
if "_25" in f or "_26" in f or "_27" in f:
continue
print join(path, f)
remove(join(path, f))
def start(self, rpc=True, web=True):
""" starts the fun :D """
self.version = CURRENT_VERSION
if not exists("pyload.conf"):
from module.setup import Setup
print "This is your first start, running configuration assistent now."
self.config = ConfigParser()
s = Setup(pypath, self.config)
res = False
try:
res = s.start()
except SystemExit:
pass
except KeyboardInterrupt:
print "\nSetup interrupted"
except:
res = False
print_exc()
print "Setup failed"
if not res:
remove("pyload.conf")
exit()
try: signal.signal(signal.SIGQUIT, self.quit)
except: pass
self.config = ConfigParser()
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("pyLoad", self.path("locale"),
languages=[self.config['general']['language'],"en"],fallback=True)
translation.install(True)
self.debug = self.doDebug or self.config['general']['debug_mode']
self.remote &= self.config['remote']['activated']
pid = self.isAlreadyRunning()
if pid:
print _("pyLoad already running with pid %s") % pid
exit()
if os.name != "nt" and self.config["general"]["renice"]:
os.system("renice %d %d" % (self.config["general"]["renice"], os.getpid()))
if self.config["permission"]["change_group"]:
if os.name != "nt":
try:
from grp import getgrnam
group = getgrnam(self.config["permission"]["group"])
os.setgid(group[2])
except Exception, e:
print _("Failed changing group: %s") % e
if self.config["permission"]["change_user"]:
if os.name != "nt":
try:
from pwd import getpwnam
user = getpwnam(self.config["permission"]["user"])
os.setuid(user[2])
except Exception, e:
print _("Failed changing user: %s") % e
self.check_file(self.config['log']['log_folder'], _("folder for logs"), True)
if self.debug:
self.init_logger(logging.DEBUG) # logging level
else:
self.init_logger(logging.INFO) # logging level
self.do_kill = False
self.do_restart = False
self.shuttedDown = False
self.log.info(_("Starting") + " pyLoad %s" % CURRENT_VERSION)
self.log.info(_("Using home directory: %s") % getcwd())
self.writePidFile()
#@TODO refractor
remote.activated = self.remote
self.log.debug("Remote activated: %s" % self.remote)
self.check_install("Crypto", _("pycrypto to decode container files"))
#img = self.check_install("Image", _("Python Image Libary (PIL) for captcha reading"))
#self.check_install("pycurl", _("pycurl to download any files"), True, True)
self.check_file("tmp", _("folder for temporary files"), True)
#tesser = self.check_install("tesseract", _("tesseract for captcha reading"), False) if os.name != "nt" else True
self.captcha = True # checks seems to fail, althoug tesseract is available
self.check_file(self.config['general']['download_folder'], _("folder for downloads"), True)
if self.config['ssl']['activated']:
self.check_install("OpenSSL", _("OpenSSL for secure connection"))
self.setupDB()
if self.config.oldRemoteData:
self.log.info(_("Moving old user config to DB"))
self.db.addUser(self.config.oldRemoteData["username"], self.config.oldRemoteData["password"])
self.log.info(_("Please check your logindata with ./pyLoadCore.py -u"))
if self.deleteLinks:
self.log.info(_("All links removed"))
self.db.purgeLinks()
self.requestFactory = RequestFactory(self)
__builtin__.pyreq = self.requestFactory
self.lastClientConnected = 0
# later imported because they would trigger api import, and remote value not set correctly
from module import Api
from module.HookManager import HookManager
from module.ThreadManager import ThreadManager
if Api.activated != self.remote:
self.log.warning("Import error: API remote status not correct.")
self.api = Api.Api(self)
self.scheduler = Scheduler(self)
#hell yeah, so many important managers :D
self.pluginManager = PluginManager(self)
self.pullManager = PullManager(self)
self.accountManager = AccountManager(self)
self.threadManager = ThreadManager(self)
self.captchaManager = CaptchaManager(self)
self.hookManager = HookManager(self)
self.remoteManager = RemoteManager(self)
self.js = JsEngine()
self.log.info(_("Downloadtime: %s") % self.api.isTimeDownload())
if rpc:
self.remoteManager.startBackends()
if web:
self.init_webserver()
spaceLeft = freeSpace(self.config["general"]["download_folder"])
self.log.info(_("Free space: %s") % formatSize(spaceLeft))
self.config.save() #save so config files gets filled
link_file = join(pypath, "links.txt")
if exists(link_file):
f = open(link_file, "rb")
if f.read().strip():
self.api.addPackage("links.txt", [link_file], 1)
f.close()
link_file = "links.txt"
if exists(link_file):
f = open(link_file, "rb")
if f.read().strip():
self.api.addPackage("links.txt", [link_file], 1)
f.close()
#self.scheduler.addJob(0, self.accountManager.getAccountInfos)
self.log.info(_("Activating Accounts..."))
self.accountManager.getAccountInfos()
self.threadManager.pause = False
self.running = True
self.log.info(_("Activating Plugins..."))
self.hookManager.coreReady()
self.log.info(_("pyLoad is up and running"))
#test api
# from module.common.APIExerciser import startApiExerciser
# startApiExerciser(self, 3)
#some memory stats
# from guppy import hpy
# hp=hpy()
# import objgraph
# objgraph.show_most_common_types(limit=20)
# import memdebug
# memdebug.start(8002)
locals().clear()
while True:
sleep(2)
if self.do_restart:
self.log.info(_("restarting pyLoad"))
self.restart()
if self.do_kill:
self.shutdown()
self.log.info(_("pyLoad quits"))
self.removeLogger()
_exit(0) #@TODO thrift blocks shutdown
self.threadManager.work()
self.scheduler.work()
def setupDB(self):
self.db = DatabaseBackend(self) # the backend
self.db.setup()
self.files = FileHandler(self)
self.db.manager = self.files #ugly?
def init_webserver(self):
if self.config['webinterface']['activated']:
self.webserver = WebServer(self)
self.webserver.start()
def init_logger(self, level):
console = logging.StreamHandler(sys.stdout)
frm = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s", "%d.%m.%Y %H:%M:%S")
console.setFormatter(frm)
self.log = logging.getLogger("log") # settable in config
if self.config['log']['file_log']:
if self.config['log']['log_rotate']:
file_handler = logging.handlers.RotatingFileHandler(join(self.config['log']['log_folder'], 'log.txt'),
maxBytes=self.config['log']['log_size'] * 1024,
backupCount=int(self.config['log']['log_count']),
encoding="utf8")
else:
file_handler = logging.FileHandler(join(self.config['log']['log_folder'], 'log.txt'), encoding="utf8")
file_handler.setFormatter(frm)
self.log.addHandler(file_handler)
self.log.addHandler(console) #if console logging
self.log.setLevel(level)
def removeLogger(self):
for h in list(self.log.handlers):
self.log.removeHandler(h)
h.close()
def check_install(self, check_name, legend, python=True, essential=False):
"""check wether needed tools are installed"""
try:
if python:
find_module(check_name)
else:
pipe = subprocess.PIPE
subprocess.Popen(check_name, stdout=pipe, stderr=pipe)
return True
except:
if essential:
self.log.info(_("Install %s") % legend)
exit()
return False
def check_file(self, check_names, description="", folder=False, empty=True, essential=False, quiet=False):
"""check wether needed files exists"""
tmp_names = []
if not type(check_names) == list:
tmp_names.append(check_names)
else:
tmp_names.extend(check_names)
file_created = True
file_exists = True
for tmp_name in tmp_names:
if not exists(tmp_name):
file_exists = False
if empty:
try:
if folder:
tmp_name = tmp_name.replace("/", sep)
makedirs(tmp_name)
else:
open(tmp_name, "w")
except:
file_created = False
else:
file_created = False
if not file_exists and not quiet:
if file_created:
#self.log.info( _("%s created") % description )
pass
else:
if not empty:
self.log.warning(
_("could not find %(desc)s: %(name)s") % {"desc": description, "name": tmp_name})
else:
print _("could not create %(desc)s: %(name)s") % {"desc": description, "name": tmp_name}
if essential:
exit()
def isClientConnected(self):
return (self.lastClientConnected + 30) > time()
def restart(self):
self.shutdown()
chdir(owd)
# close some open fds
for i in range(3,50):
try:
close(i)
except :
pass
execl(executable, executable, *sys.argv)
_exit(0)
def shutdown(self):
self.log.info(_("shutting down..."))
try:
if self.config['webinterface']['activated'] and hasattr(self, "webserver"):
self.webserver.quit()
for thread in self.threadManager.threads:
thread.put("quit")
pyfiles = self.files.cache.values()
for pyfile in pyfiles:
pyfile.abortDownload()
self.hookManager.coreExiting()
except:
if self.debug:
print_exc()
self.log.info(_("error while shutting down"))
finally:
self.files.syncSave()
self.shuttedDown = True
self.deletePidFile()
def path(self, *args):
return join(pypath, *args)
def deamon():
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
print >> sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >> sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# Iterate through and close some file descriptors.
for fd in range(0, 3):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
os.open(os.devnull, os.O_RDWR) # standard input (0)
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2)
pyload_core = Core()
pyload_core.start()
def main():
#change name to 'pyLoadCore'
#from module.lib.rename_process import renameProcess
#renameProcess('pyLoadCore')
if "--daemon" in sys.argv:
deamon()
else:
pyload_core = Core()
try:
pyload_core.start()
except KeyboardInterrupt:
pyload_core.shutdown()
pyload_core.log.info(_("killed pyLoad from Terminal"))
pyload_core.removeLogger()
_exit(1)
# And so it begins...
if __name__ == "__main__":
main()
|
storborg/manhattan
|
refs/heads/master
|
manhattan/log/memory.py
|
1
|
from __future__ import absolute_import, division, print_function
import logging
from collections import deque
log = logging.getLogger(__name__)
class MemoryLog(object):
"""
An in-memory log, intended for testing only. This log does not support
resuming (server crash recovery).
"""
def __init__(self):
self.q = deque()
def write(self, *records):
log.info('Writing records: %r', records)
self.q.extend(records)
def process(self):
log.info('Swapping out log.')
to_process = self.q
log.info('Creating new log.')
self.q = deque()
log.info('Playing back old log.')
for record in to_process:
log.info('Playing record: %r', record)
yield record, None
def purge(self):
log.info('Purging log.')
self.q = deque()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.